blob: a3f35636040a1160996051a68ae71d8f78472a9b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
Peter Krystadd6a9ceb2011-12-01 15:44:54 -0800552 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
554 l2cap_pi(sk));
555 }
556 l2cap_pi(sk)->ampchan = NULL;
557 l2cap_pi(sk)->amp_id = 0;
558 }
559
560 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200561 sock_set_flag(sk, SOCK_ZAPPED);
562
563 if (err)
564 sk->sk_err = err;
565
566 if (parent) {
567 bt_accept_unlink(sk);
568 parent->sk_data_ready(parent, 0);
569 } else
570 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
575 if (l2cap_pi(sk)->sdu)
576 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300577
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
581 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300583 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200584}
585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300587{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 if (sk->sk_type == SOCK_RAW) {
589 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530590 case BT_SECURITY_HIGH:
591 return HCI_AT_DEDICATED_BONDING_MITM;
592 case BT_SECURITY_MEDIUM:
593 return HCI_AT_DEDICATED_BONDING;
594 default:
595 return HCI_AT_NO_BONDING;
596 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
598 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
599 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530602 return HCI_AT_NO_BONDING_MITM;
603 else
604 return HCI_AT_NO_BONDING;
605 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530607 case BT_SECURITY_HIGH:
608 return HCI_AT_GENERAL_BONDING_MITM;
609 case BT_SECURITY_MEDIUM:
610 return HCI_AT_GENERAL_BONDING;
611 default:
612 return HCI_AT_NO_BONDING;
613 }
614 }
615}
616
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200617/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200619{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100621 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200622
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
626 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200627}
628
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200630{
631 u8 id;
632
633 /* Get next available identificator.
634 * 1 - 128 are used by kernel.
635 * 129 - 199 are reserved.
636 * 200 - 254 are used by utilities like l2ping, etc.
637 */
638
639 spin_lock_bh(&conn->lock);
640
641 if (++conn->tx_ident > 128)
642 conn->tx_ident = 1;
643
644 id = conn->tx_ident;
645
646 spin_unlock_bh(&conn->lock);
647
648 return id;
649}
650
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651static void apply_fcs(struct sk_buff *skb)
652{
653 size_t len;
654 u16 partial_crc;
655 struct sk_buff *iter;
656 struct sk_buff *final_frag = skb;
657
658 if (skb_has_frag_list(skb))
659 len = skb_headlen(skb);
660 else
661 len = skb->len - L2CAP_FCS_SIZE;
662
663 partial_crc = crc16(0, (u8 *) skb->data, len);
664
665 skb_walk_frags(skb, iter) {
666 len = iter->len;
667 if (!iter->next)
668 len -= L2CAP_FCS_SIZE;
669
670 partial_crc = crc16(partial_crc, iter->data, len);
671 final_frag = iter;
672 }
673
674 put_unaligned_le16(partial_crc,
675 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
676}
677
678void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200679{
680 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200681 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200682
683 BT_DBG("code 0x%2.2x", code);
684
685 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300686 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200687
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200688 if (lmp_no_flush_capable(conn->hcon->hdev))
689 flags = ACL_START_NO_FLUSH;
690 else
691 flags = ACL_START;
692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200696}
697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300699{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300701}
702
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300704{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 struct l2cap_conn_req req;
706 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
707 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
712 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300713}
714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300716{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 struct l2cap_create_chan_req req;
718 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
719 req.psm = l2cap_pi(sk)->psm;
720 req.amp_id = amp_id;
721
722 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
723 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
724
725 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
726 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300727}
728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200730{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200732
733 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100734 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
735 return;
736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
738 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200739
Peter Krystadc446d212011-09-20 15:35:50 -0700740 if (l2cap_pi(sk)->amp_pref ==
741 BT_AMP_POLICY_PREFER_AMP &&
742 conn->fc_mask & L2CAP_FC_A2MP)
743 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 else
745 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200746 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200747 } else {
748 struct l2cap_info_req req;
749 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
750
751 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
752 conn->info_ident = l2cap_get_ident(conn);
753
754 mod_timer(&conn->info_timer, jiffies +
755 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
756
757 l2cap_send_cmd(conn, conn->info_ident,
758 L2CAP_INFO_REQ, sizeof(req), &req);
759 }
760}
761
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300762static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
763{
764 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300765 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300766 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
767
768 switch (mode) {
769 case L2CAP_MODE_ERTM:
770 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
771 case L2CAP_MODE_STREAMING:
772 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
773 default:
774 return 0x00;
775 }
776}
777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300779{
780 struct l2cap_disconn_req req;
781
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300782 if (!conn)
783 return;
784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
788 skb_queue_purge(SREJ_QUEUE(sk));
789
790 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
792 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300793 }
794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
796 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300797 l2cap_send_cmd(conn, l2cap_get_ident(conn),
798 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300801 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300802}
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200805static void l2cap_conn_start(struct l2cap_conn *conn)
806{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 struct l2cap_chan_list *l = &conn->chan_list;
808 struct sock_del_list del, *tmp1, *tmp2;
809 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200810
811 BT_DBG("conn %p", conn);
812
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200818 bh_lock_sock(sk);
819
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 if (sk->sk_type != SOCK_SEQPACKET &&
821 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200822 bh_unlock_sock(sk);
823 continue;
824 }
825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 if (sk->sk_state == BT_CONNECT) {
827 if (!l2cap_check_security(sk) ||
828 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300829 bh_unlock_sock(sk);
830 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200831 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
834 conn->feat_mask)
835 && l2cap_pi(sk)->conf_state &
836 L2CAP_CONF_STATE2_DEVICE) {
837 tmp1 = kzalloc(sizeof(struct sock_del_list),
838 GFP_ATOMIC);
839 tmp1->sk = sk;
840 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300841 bh_unlock_sock(sk);
842 continue;
843 }
844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300846
Peter Krystadc446d212011-09-20 15:35:50 -0700847 if (l2cap_pi(sk)->amp_pref ==
848 BT_AMP_POLICY_PREFER_AMP &&
849 conn->fc_mask & L2CAP_FC_A2MP)
850 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 else
852 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200855 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300856 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
858 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100861 if (bt_sk(sk)->defer_setup) {
862 struct sock *parent = bt_sk(sk)->parent;
863 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
864 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700865 if (parent)
866 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867
868 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100870 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
871 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
872 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200873 } else {
874 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
875 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
876 }
877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
879 l2cap_pi(sk)->amp_id) {
880 amp_accept_physical(conn,
881 l2cap_pi(sk)->amp_id, sk);
882 bh_unlock_sock(sk);
883 continue;
884 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
887 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
888
889 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300890 rsp.result != L2CAP_CR_SUCCESS) {
891 bh_unlock_sock(sk);
892 continue;
893 }
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300896 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 l2cap_build_conf_req(sk, buf), buf);
898 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200899 }
900
901 bh_unlock_sock(sk);
902 }
903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 read_unlock(&l->lock);
905
906 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
907 bh_lock_sock(tmp1->sk);
908 __l2cap_sock_close(tmp1->sk, ECONNRESET);
909 bh_unlock_sock(tmp1->sk);
910 list_del(&tmp1->list);
911 kfree(tmp1);
912 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200913}
914
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700915/* Find socket with fixed cid with given source and destination bdaddrs.
916 * Returns closest match, locked.
917 */
918static struct sock *l2cap_get_sock_by_fixed_scid(int state,
919 __le16 cid, bdaddr_t *src, bdaddr_t *dst)
920{
921 struct sock *sk = NULL, *sk1 = NULL;
922 struct hlist_node *node;
923
924 read_lock(&l2cap_sk_list.lock);
925
926 sk_for_each(sk, node, &l2cap_sk_list.head) {
927 if (state && sk->sk_state != state)
928 continue;
929
930 if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
931 /* Exact match. */
932 if (!bacmp(&bt_sk(sk)->src, src))
933 break;
934
935 /* Closest match */
936 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
937 sk1 = sk;
938 }
939 }
940
941 read_unlock(&l2cap_sk_list.lock);
942
943 return node ? sk : sk1;
944}
945
Ville Tervob62f3282011-02-10 22:38:50 -0300946/* Find socket with cid and source bdaddr.
947 * Returns closest match, locked.
948 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300950{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 struct sock *sk = NULL, *sk1 = NULL;
952 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300955
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 sk_for_each(sk, node, &l2cap_sk_list.head) {
957 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300958 continue;
959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300961 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 if (!bacmp(&bt_sk(sk)->src, src))
963 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300964
965 /* Closest match */
966 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300968 }
969 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300974}
975
976static void l2cap_le_conn_ready(struct l2cap_conn *conn)
977{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 struct l2cap_chan_list *list = &conn->chan_list;
979 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
981 BT_DBG("");
982
983 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300985 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300987 return;
988
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300989 bh_lock_sock(parent);
990
Ville Tervob62f3282011-02-10 22:38:50 -0300991 /* Check for backlog size */
992 if (sk_acceptq_is_full(parent)) {
993 BT_DBG("backlog full %d", parent->sk_ack_backlog);
994 goto clean;
995 }
996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
998 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300999 goto clean;
1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001002
1003 hci_conn_hold(conn->hcon);
1004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -03001006 bacpy(&bt_sk(sk)->src, conn->src);
1007 bacpy(&bt_sk(sk)->dst, conn->dst);
1008
Gustavo F. Padovand1010242011-03-25 00:39:48 -03001009 bt_accept_enqueue(parent, sk);
1010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -03001014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -03001016 parent->sk_data_ready(parent, 0);
1017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001019
1020clean:
1021 bh_unlock_sock(parent);
1022}
1023
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001024static void l2cap_conn_ready(struct l2cap_conn *conn)
1025{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026 struct l2cap_chan_list *l = &conn->chan_list;
1027 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001028
1029 BT_DBG("conn %p", conn);
1030
Ville Tervob62f3282011-02-10 22:38:50 -03001031 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1032 l2cap_le_conn_ready(conn);
1033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001035
Brian Gixa68668b2011-08-11 15:49:36 -07001036 if (l->head) {
1037 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1038 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001039
Brian Gixa68668b2011-08-11 15:49:36 -07001040 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001041 u8 sec_level = l2cap_pi(sk)->sec_level;
1042 u8 pending_sec = conn->hcon->pending_sec_level;
1043
1044 if (pending_sec > sec_level)
1045 sec_level = pending_sec;
1046
Brian Gix065e8ff2011-09-29 15:14:08 -07001047 if (smp_conn_security(conn, sec_level)) {
Brian Gixa68668b2011-08-11 15:49:36 -07001048 l2cap_chan_ready(sk);
Brian Gix065e8ff2011-09-29 15:14:08 -07001049 hci_conn_put(conn->hcon);
1050 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001051
Brian Gixa68668b2011-08-11 15:49:36 -07001052 } else if (sk->sk_type != SOCK_SEQPACKET &&
1053 sk->sk_type != SOCK_STREAM) {
1054 l2cap_sock_clear_timer(sk);
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057 } else if (sk->sk_state == BT_CONNECT)
1058 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001059
Brian Gixa68668b2011-08-11 15:49:36 -07001060 bh_unlock_sock(sk);
1061 }
1062 } else if (conn->hcon->type == LE_LINK) {
1063 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001064 }
1065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001067}
1068
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001069/* Notify sockets that we cannot guaranty reliability anymore */
1070static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1071{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 struct l2cap_chan_list *l = &conn->chan_list;
1073 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001074
1075 BT_DBG("conn %p", conn);
1076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1080 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001081 sk->sk_err = err;
1082 }
1083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001085}
1086
1087static void l2cap_info_timeout(unsigned long arg)
1088{
1089 struct l2cap_conn *conn = (void *) arg;
1090
Marcel Holtmann984947d2009-02-06 23:35:19 +01001091 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001092 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001093
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001094 l2cap_conn_start(conn);
1095}
1096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1098{
Marcel Holtmann01394182006-07-03 10:02:46 +02001099 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Marcel Holtmann01394182006-07-03 10:02:46 +02001101 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return conn;
1103
Marcel Holtmann01394182006-07-03 10:02:46 +02001104 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1105 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 hcon->l2cap_data = conn;
1109 conn->hcon = hcon;
1110
Marcel Holtmann01394182006-07-03 10:02:46 +02001111 BT_DBG("hcon %p conn %p", hcon, conn);
1112
Ville Tervoacd7d372011-02-10 22:38:49 -03001113 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1114 conn->mtu = hcon->hdev->le_mtu;
1115 else
1116 conn->mtu = hcon->hdev->acl_mtu;
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 conn->src = &hcon->hdev->bdaddr;
1119 conn->dst = &hcon->dst;
1120
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001121 conn->feat_mask = 0;
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001126 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001127 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001128 (unsigned long) conn);
1129 else
Ville Tervob62f3282011-02-10 22:38:50 -03001130 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001131 (unsigned long) conn);
1132
Marcel Holtmann2950f212009-02-12 14:02:50 +01001133 conn->disc_reason = 0x13;
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 return conn;
1136}
1137
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140 struct l2cap_conn *conn = hcon->l2cap_data;
1141 struct sock *sk;
1142 struct sock *next;
1143
1144 if (!conn)
1145 return;
1146
1147 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1148
1149 if ((conn->hcon == hcon) && (conn->rx_skb))
1150 kfree_skb(conn->rx_skb);
1151
1152 BT_DBG("conn->hcon %p", conn->hcon);
1153
1154 /* Kill channels */
1155 for (sk = conn->chan_list.head; sk; ) {
1156 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1157 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1158 next = l2cap_pi(sk)->next_c;
1159 bh_lock_sock(sk);
1160 l2cap_chan_del(sk, err);
1161 bh_unlock_sock(sk);
1162 l2cap_sock_kill(sk);
1163 sk = next;
1164 } else
1165 sk = l2cap_pi(sk)->next_c;
1166 }
1167
1168 if (conn->hcon == hcon) {
1169 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1170 del_timer_sync(&conn->info_timer);
1171
1172 hcon->l2cap_data = NULL;
1173
1174 kfree(conn);
1175 }
1176}
1177
1178static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1179{
1180 struct l2cap_chan_list *l = &conn->chan_list;
1181 write_lock_bh(&l->lock);
1182 __l2cap_chan_add(conn, sk);
1183 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184}
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188/* Find socket with psm and source bdaddr.
1189 * Returns closest match.
1190 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 struct sock *sk = NULL, *sk1 = NULL;
1194 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 sk_for_each(sk, node, &l2cap_sk_list.head) {
1199 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 continue;
1201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 if (!bacmp(&bt_sk(sk)->src, src))
1205 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 /* Closest match */
1208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001215 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
1220 bdaddr_t *src = &bt_sk(sk)->src;
1221 bdaddr_t *dst = &bt_sk(sk)->dst;
1222 struct l2cap_conn *conn;
1223 struct hci_conn *hcon;
1224 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001225 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001226 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001228 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001231 hdev = hci_get_route(dst, src);
1232 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 return -EHOSTUNREACH;
1234
1235 hci_dev_lock_bh(hdev);
1236
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 if (l2cap_pi(sk)->fixed_channel) {
1240 /* Fixed channels piggyback on existing ACL connections */
1241 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1242 if (!hcon || !hcon->l2cap_data) {
1243 err = -ENOTCONN;
1244 goto done;
1245 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 conn = hcon->l2cap_data;
1248 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001249 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001251 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001252 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001254 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 if (IS_ERR(hcon)) {
1257 err = PTR_ERR(hcon);
1258 goto done;
1259 }
1260
1261 conn = l2cap_conn_add(hcon, 0);
1262 if (!conn) {
1263 hci_conn_put(hcon);
1264 err = -ENOMEM;
1265 goto done;
1266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 /* Update source addr of the socket */
1270 bacpy(src, conn->src);
1271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001273
Brian Gixa68668b2011-08-11 15:49:36 -07001274 if ((l2cap_pi(sk)->fixed_channel) ||
1275 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1276 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 sk->sk_state = BT_CONNECTED;
1278 sk->sk_state_change(sk);
1279 } else {
1280 sk->sk_state = BT_CONNECT;
1281 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1282 sk->sk_state_change(sk);
1283
1284 if (hcon->state == BT_CONNECTED) {
1285 if (sk->sk_type != SOCK_SEQPACKET &&
1286 sk->sk_type != SOCK_STREAM) {
1287 l2cap_sock_clear_timer(sk);
1288 if (l2cap_check_security(sk)) {
1289 sk->sk_state = BT_CONNECTED;
1290 sk->sk_state_change(sk);
1291 }
1292 } else
1293 l2cap_do_start(sk);
1294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 }
1296
Ville Tervo30e76272011-02-22 16:10:53 -03001297 err = 0;
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299done:
1300 hci_dev_unlock_bh(hdev);
1301 hci_dev_put(hdev);
1302 return err;
1303}
1304
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001305int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001306{
1307 DECLARE_WAITQUEUE(wait, current);
1308 int err = 0;
1309 int timeo = HZ/5;
1310
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001311 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1313 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1314 set_current_state(TASK_INTERRUPTIBLE);
1315
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001316 if (!timeo)
1317 timeo = HZ/5;
1318
1319 if (signal_pending(current)) {
1320 err = sock_intr_errno(timeo);
1321 break;
1322 }
1323
1324 release_sock(sk);
1325 timeo = schedule_timeout(timeo);
1326 lock_sock(sk);
1327
1328 err = sock_error(sk);
1329 if (err)
1330 break;
1331 }
1332 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001333 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001334 return err;
1335}
1336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001338{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 struct l2cap_pinfo *pi =
1340 container_of(work, struct l2cap_pinfo, tx_work);
1341 struct sock *sk = (struct sock *)pi;
1342 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344 lock_sock(sk);
1345 l2cap_ertm_send(sk);
1346 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07001347 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001348}
1349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001351{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 struct sock *sk = skb->sk;
1353 int queued;
Mat Martineau2f0cd842011-10-20 14:34:26 -07001354 int keep_sk = 0;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1357 if (queued < L2CAP_MIN_ERTM_QUEUED)
Mat Martineau2f0cd842011-10-20 14:34:26 -07001358 keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
1359
1360 if (!keep_sk)
1361 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001362}
1363
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001365{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1371 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1372 BT_DBG("Sending on AMP connection %p %p",
1373 pi->ampcon, pi->ampchan);
1374 if (pi->ampchan)
1375 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1376 ACL_COMPLETE);
1377 else
1378 kfree_skb(skb);
1379 } else {
1380 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 bt_cb(skb)->force_active = pi->force_active;
1383 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1386 !l2cap_pi(sk)->flushable)
1387 flags = ACL_START_NO_FLUSH;
1388 else
1389 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001392 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001393}
1394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001396{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001397 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 struct l2cap_pinfo *pi = l2cap_pi(sk);
1399 struct bt_l2cap_control *control;
1400 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001405 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001407 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1408 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1411 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1412 return 0;
1413
1414 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1415 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1416 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1417
1418 skb = sk->sk_send_head;
1419
1420 bt_cb(skb)->retries = 1;
1421 control = &bt_cb(skb)->control;
1422
1423 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1424 control->final = 1;
1425 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1426 }
1427 control->reqseq = pi->buffer_seq;
1428 pi->last_acked_seq = pi->buffer_seq;
1429 control->txseq = pi->next_tx_seq;
1430
1431 if (pi->extended_control) {
1432 put_unaligned_le32(__pack_extended_control(control),
1433 skb->data + L2CAP_HDR_SIZE);
1434 } else {
1435 put_unaligned_le16(__pack_enhanced_control(control),
1436 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001437 }
1438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439 if (pi->fcs == L2CAP_FCS_CRC16)
1440 apply_fcs(skb);
1441
1442 /* Clone after data has been modified. Data is assumed to be
1443 read-only (for locking purposes) on cloned sk_buffs.
1444 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001445 tx_skb = skb_clone(skb, GFP_ATOMIC);
1446
Mat Martineau0c04ef92011-12-07 16:41:22 -08001447 if (!tx_skb)
1448 break;
1449
Mat Martineau2f0cd842011-10-20 14:34:26 -07001450 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 tx_skb->sk = sk;
1452 tx_skb->destructor = l2cap_skb_destructor;
1453 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1462 pi->unacked_frames += 1;
1463 pi->frames_sent += 1;
1464 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1467 sk->sk_send_head = NULL;
1468 else
1469 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1470 }
1471
1472 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1473 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1474 atomic_read(&pi->ertm_queued));
1475
1476 return sent;
1477}
1478
1479int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1480{
1481 struct sk_buff *skb;
1482 struct l2cap_pinfo *pi = l2cap_pi(sk);
1483 struct bt_l2cap_control *control;
1484 int sent = 0;
1485
1486 BT_DBG("sk %p, skbs %p", sk, skbs);
1487
1488 if (sk->sk_state != BT_CONNECTED)
1489 return -ENOTCONN;
1490
1491 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1492 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1493 return 0;
1494
1495 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1496
1497 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1498 while (!skb_queue_empty(TX_QUEUE(sk))) {
1499
1500 skb = skb_dequeue(TX_QUEUE(sk));
1501
1502 BT_DBG("skb %p", skb);
1503
1504 bt_cb(skb)->retries = 1;
1505 control = &bt_cb(skb)->control;
1506
1507 BT_DBG("control %p", control);
1508
1509 control->reqseq = 0;
1510 control->txseq = pi->next_tx_seq;
1511
1512 if (pi->extended_control) {
1513 put_unaligned_le32(__pack_extended_control(control),
1514 skb->data + L2CAP_HDR_SIZE);
1515 } else {
1516 put_unaligned_le16(__pack_enhanced_control(control),
1517 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001518 }
1519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 if (pi->fcs == L2CAP_FCS_CRC16)
1521 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1528 pi->frames_sent += 1;
1529 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001530 }
1531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001532 BT_DBG("Sent %d", sent);
1533
1534 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001535}
1536
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001538{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539 while (len > 0) {
1540 if (iv->iov_len) {
1541 int copy = min_t(unsigned int, len, iv->iov_len);
1542 memcpy(kdata, iv->iov_base, copy);
1543 len -= copy;
1544 kdata += copy;
1545 iv->iov_base += copy;
1546 iv->iov_len -= copy;
1547 }
1548 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001549 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001552}
1553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1555 int len, int count, struct sk_buff *skb,
1556 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001557{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001559 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001561 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1564 msg, (int)len, (int)count, skb);
1565
1566 if (!conn)
1567 return -ENOTCONN;
1568
1569 /* When resegmenting, data is copied from kernel space */
1570 if (reseg) {
1571 err = memcpy_fromkvec(skb_put(skb, count),
1572 (struct kvec *) msg->msg_iov, count);
1573 } else {
1574 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1575 count);
1576 }
1577
1578 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001579 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
1581 sent += count;
1582 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
1585 /* Continuation fragments (no L2CAP header) */
1586 frag = &skb_shinfo(skb)->frag_list;
1587 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 count = min_t(unsigned int, conn->mtu, len);
1590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 /* Add room for the FCS if it fits */
1592 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1593 len + L2CAP_FCS_SIZE <= conn->mtu)
1594 skblen = count + L2CAP_FCS_SIZE;
1595 else
1596 skblen = count;
1597
1598 /* Don't use bt_skb_send_alloc() while resegmenting, since
1599 * it is not ok to block.
1600 */
1601 if (reseg) {
1602 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1603 if (*frag)
1604 skb_set_owner_w(*frag, sk);
1605 } else {
1606 *frag = bt_skb_send_alloc(sk, skblen,
1607 msg->msg_flags & MSG_DONTWAIT, &err);
1608 }
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 return -EFAULT;
1612
1613 /* When resegmenting, data is copied from kernel space */
1614 if (reseg) {
1615 err = memcpy_fromkvec(skb_put(*frag, count),
1616 (struct kvec *) msg->msg_iov,
1617 count);
1618 } else {
1619 err = memcpy_fromiovec(skb_put(*frag, count),
1620 msg->msg_iov, count);
1621 }
1622
1623 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001624 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626 sent += count;
1627 len -= count;
1628
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001629 final = *frag;
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 frag = &(*frag)->next;
1632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1635 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1636 if (reseg) {
1637 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1638 GFP_ATOMIC);
1639 if (*frag)
1640 skb_set_owner_w(*frag, sk);
1641 } else {
1642 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1643 msg->msg_flags & MSG_DONTWAIT,
1644 &err);
1645 }
1646
1647 if (!*frag)
1648 return -EFAULT;
1649
1650 final = *frag;
1651 }
1652
1653 skb_put(final, L2CAP_FCS_SIZE);
1654 }
1655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001659struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001660{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001661 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001662 struct sk_buff *skb;
1663 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1664 struct l2cap_hdr *lh;
1665
1666 BT_DBG("sk %p len %d", sk, (int)len);
1667
1668 count = min_t(unsigned int, (conn->mtu - hlen), len);
1669 skb = bt_skb_send_alloc(sk, count + hlen,
1670 msg->msg_flags & MSG_DONTWAIT, &err);
1671 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001672 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001673
1674 /* Create L2CAP header */
1675 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001677 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001680 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001681 if (unlikely(err < 0)) {
1682 kfree_skb(skb);
1683 return ERR_PTR(err);
1684 }
1685 return skb;
1686}
1687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001689{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001691 struct sk_buff *skb;
1692 int err, count, hlen = L2CAP_HDR_SIZE;
1693 struct l2cap_hdr *lh;
1694
1695 BT_DBG("sk %p len %d", sk, (int)len);
1696
1697 count = min_t(unsigned int, (conn->mtu - hlen), len);
1698 skb = bt_skb_send_alloc(sk, count + hlen,
1699 msg->msg_flags & MSG_DONTWAIT, &err);
1700 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001701 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001702
1703 /* Create L2CAP header */
1704 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001706 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001709 if (unlikely(err < 0)) {
1710 kfree_skb(skb);
1711 return ERR_PTR(err);
1712 }
1713 return skb;
1714}
1715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1717 struct msghdr *msg, size_t len,
1718 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001719{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001720 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 int err, count, hlen;
1722 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001723 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001726 if (l2cap_pi(sk)->extended_control)
1727 hlen = L2CAP_EXTENDED_HDR_SIZE;
1728 else
1729 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001730
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001731 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 if (fcs == L2CAP_FCS_CRC16)
1735 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1738 sk, msg, (int)len, (int)sdulen, hlen);
1739
1740 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1741
1742 /* Allocate extra headroom for Qualcomm PAL. This is only
1743 * necessary in two places (here and when creating sframes)
1744 * because only unfragmented iframes and sframes are sent
1745 * using AMP controllers.
1746 */
1747 if (l2cap_pi(sk)->ampcon &&
1748 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1749 reserve = BT_SKB_RESERVE_80211;
1750
1751 /* Don't use bt_skb_send_alloc() while resegmenting, since
1752 * it is not ok to block.
1753 */
1754 if (reseg) {
1755 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1756 if (skb)
1757 skb_set_owner_w(skb, sk);
1758 } else {
1759 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1760 msg->msg_flags & MSG_DONTWAIT, &err);
1761 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001762 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001763 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 if (reserve)
1766 skb_reserve(skb, reserve);
1767
1768 bt_cb(skb)->control.fcs = fcs;
1769
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001770 /* Create L2CAP header */
1771 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1773 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 /* Control header is populated later */
1776 if (l2cap_pi(sk)->extended_control)
1777 put_unaligned_le32(0, skb_put(skb, 4));
1778 else
1779 put_unaligned_le16(0, skb_put(skb, 2));
1780
1781 if (sdulen)
1782 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1783
1784 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001785 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001787 kfree_skb(skb);
1788 return ERR_PTR(err);
1789 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001790
1791 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001792 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793}
1794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001796{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797 struct l2cap_pinfo *pi;
1798 struct sk_buff *acked_skb;
1799 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1806 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1809 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1810
1811 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1812 ackseq = __next_seq(ackseq, pi)) {
1813
1814 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1815 if (acked_skb) {
1816 skb_unlink(acked_skb, TX_QUEUE(sk));
1817 kfree_skb(acked_skb);
1818 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001819 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001820 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001821
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822 pi->expected_ack_seq = reqseq;
1823
1824 if (pi->unacked_frames == 0)
1825 l2cap_ertm_stop_retrans_timer(pi);
1826
1827 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001828}
1829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001831{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001832 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 int len;
1834 int reserve = 0;
1835 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 if (l2cap_pi(sk)->extended_control)
1838 len = L2CAP_EXTENDED_HDR_SIZE;
1839 else
1840 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1843 len += L2CAP_FCS_SIZE;
1844
1845 /* Allocate extra headroom for Qualcomm PAL */
1846 if (l2cap_pi(sk)->ampcon &&
1847 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1848 reserve = BT_SKB_RESERVE_80211;
1849
1850 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1851
1852 if (!skb)
1853 return ERR_PTR(-ENOMEM);
1854
1855 if (reserve)
1856 skb_reserve(skb, reserve);
1857
1858 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1859 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1860 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1861
1862 if (l2cap_pi(sk)->extended_control)
1863 put_unaligned_le32(control, skb_put(skb, 4));
1864 else
1865 put_unaligned_le16(control, skb_put(skb, 2));
1866
1867 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1868 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1869 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001870 }
1871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001872 return skb;
1873}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875static void l2cap_ertm_send_sframe(struct sock *sk,
1876 struct bt_l2cap_control *control)
1877{
1878 struct l2cap_pinfo *pi;
1879 struct sk_buff *skb;
1880 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 if (control->frame_type != 's')
1885 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1890 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1891 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1892 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1893 return;
1894 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001895
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1897 control->final = 1;
1898 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1899 }
1900
1901 if (control->super == L2CAP_SFRAME_RR)
1902 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1903 else if (control->super == L2CAP_SFRAME_RNR)
1904 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1905
1906 if (control->super != L2CAP_SFRAME_SREJ) {
1907 pi->last_acked_seq = control->reqseq;
1908 l2cap_ertm_stop_ack_timer(pi);
1909 }
1910
1911 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1912 (int) control->final, (int) control->poll,
1913 (int) control->super);
1914
1915 if (pi->extended_control)
1916 control_field = __pack_extended_control(control);
1917 else
1918 control_field = __pack_enhanced_control(control);
1919
1920 skb = l2cap_create_sframe_pdu(sk, control_field);
1921 if (!IS_ERR(skb))
1922 l2cap_do_send(sk, skb);
1923}
1924
1925static void l2cap_ertm_send_ack(struct sock *sk)
1926{
1927 struct l2cap_pinfo *pi = l2cap_pi(sk);
1928 struct bt_l2cap_control control;
1929 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1930 int threshold;
1931
1932 BT_DBG("sk %p", sk);
1933 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1934 (int)pi->buffer_seq);
1935
1936 memset(&control, 0, sizeof(control));
1937 control.frame_type = 's';
1938
1939 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1940 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1941 l2cap_ertm_stop_ack_timer(pi);
1942 control.super = L2CAP_SFRAME_RNR;
1943 control.reqseq = pi->buffer_seq;
1944 l2cap_ertm_send_sframe(sk, &control);
1945 } else {
1946 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1947 l2cap_ertm_send(sk);
1948 /* If any i-frames were sent, they included an ack */
1949 if (pi->buffer_seq == pi->last_acked_seq)
1950 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001951 }
1952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 /* Ack now if the tx window is 3/4ths full.
1954 * Calculate without mul or div
1955 */
1956 threshold = pi->tx_win;
1957 threshold += threshold << 1;
1958 threshold >>= 2;
1959
1960 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1961 threshold);
1962
1963 if (frames_to_ack >= threshold) {
1964 l2cap_ertm_stop_ack_timer(pi);
1965 control.super = L2CAP_SFRAME_RR;
1966 control.reqseq = pi->buffer_seq;
1967 l2cap_ertm_send_sframe(sk, &control);
1968 frames_to_ack = 0;
1969 }
1970
1971 if (frames_to_ack)
1972 l2cap_ertm_start_ack_timer(pi);
1973 }
1974}
1975
1976static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1977{
1978 struct l2cap_pinfo *pi;
1979 struct bt_l2cap_control control;
1980
1981 BT_DBG("sk %p, poll %d", sk, (int) poll);
1982
1983 pi = l2cap_pi(sk);
1984
1985 memset(&control, 0, sizeof(control));
1986 control.frame_type = 's';
1987 control.poll = poll;
1988
1989 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1990 control.super = L2CAP_SFRAME_RNR;
1991 else
1992 control.super = L2CAP_SFRAME_RR;
1993
1994 control.reqseq = pi->buffer_seq;
1995 l2cap_ertm_send_sframe(sk, &control);
1996}
1997
1998static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1999{
2000 struct l2cap_pinfo *pi;
2001 struct bt_l2cap_control control;
2002
2003 BT_DBG("sk %p", sk);
2004
2005 pi = l2cap_pi(sk);
2006
2007 memset(&control, 0, sizeof(control));
2008 control.frame_type = 's';
2009 control.final = 1;
2010 control.reqseq = pi->buffer_seq;
2011 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
2012
2013 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2014 control.super = L2CAP_SFRAME_RNR;
2015 l2cap_ertm_send_sframe(sk, &control);
2016 }
2017
2018 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
2019 (pi->unacked_frames > 0))
2020 l2cap_ertm_start_retrans_timer(pi);
2021
2022 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
2023
2024 /* Send pending iframes */
2025 l2cap_ertm_send(sk);
2026
2027 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
2028 /* F-bit wasn't sent in an s-frame or i-frame yet, so
2029 * send it now.
2030 */
2031 control.super = L2CAP_SFRAME_RR;
2032 l2cap_ertm_send_sframe(sk, &control);
2033 }
2034}
2035
2036static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
2037{
2038 struct bt_l2cap_control control;
2039 struct l2cap_pinfo *pi;
2040 u16 seq;
2041
2042 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2043
2044 pi = l2cap_pi(sk);
2045 memset(&control, 0, sizeof(control));
2046 control.frame_type = 's';
2047 control.super = L2CAP_SFRAME_SREJ;
2048
2049 for (seq = pi->expected_tx_seq; seq != txseq;
2050 seq = __next_seq(seq, pi)) {
2051 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2052 control.reqseq = seq;
2053 l2cap_ertm_send_sframe(sk, &control);
2054 l2cap_seq_list_append(&pi->srej_list, seq);
2055 }
2056 }
2057
2058 pi->expected_tx_seq = __next_seq(txseq, pi);
2059}
2060
2061static void l2cap_ertm_send_srej_tail(struct sock *sk)
2062{
2063 struct bt_l2cap_control control;
2064 struct l2cap_pinfo *pi;
2065
2066 BT_DBG("sk %p", sk);
2067
2068 pi = l2cap_pi(sk);
2069
2070 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2071 return;
2072
2073 memset(&control, 0, sizeof(control));
2074 control.frame_type = 's';
2075 control.super = L2CAP_SFRAME_SREJ;
2076 control.reqseq = pi->srej_list.tail;
2077 l2cap_ertm_send_sframe(sk, &control);
2078}
2079
2080static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2081{
2082 struct bt_l2cap_control control;
2083 struct l2cap_pinfo *pi;
2084 u16 initial_head;
2085 u16 seq;
2086
2087 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2088
2089 pi = l2cap_pi(sk);
2090 memset(&control, 0, sizeof(control));
2091 control.frame_type = 's';
2092 control.super = L2CAP_SFRAME_SREJ;
2093
2094 /* Capture initial list head to allow only one pass through the list. */
2095 initial_head = pi->srej_list.head;
2096
2097 do {
2098 seq = l2cap_seq_list_pop(&pi->srej_list);
2099 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2100 break;
2101
2102 control.reqseq = seq;
2103 l2cap_ertm_send_sframe(sk, &control);
2104 l2cap_seq_list_append(&pi->srej_list, seq);
2105 } while (pi->srej_list.head != initial_head);
2106}
2107
2108static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2109{
2110 struct l2cap_pinfo *pi = l2cap_pi(sk);
2111 BT_DBG("sk %p", sk);
2112
2113 pi->expected_tx_seq = pi->buffer_seq;
2114 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2115 skb_queue_purge(SREJ_QUEUE(sk));
2116 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2117}
2118
2119static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2120 struct bt_l2cap_control *control,
2121 struct sk_buff_head *skbs, u8 event)
2122{
2123 struct l2cap_pinfo *pi;
2124 int err = 0;
2125
2126 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2127 (int)event);
2128 pi = l2cap_pi(sk);
2129
2130 switch (event) {
2131 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2132 if (sk->sk_send_head == NULL)
2133 sk->sk_send_head = skb_peek(skbs);
2134
2135 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2136 l2cap_ertm_send(sk);
2137 break;
2138 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2139 BT_DBG("Enter LOCAL_BUSY");
2140 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2141
2142 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2143 /* The SREJ_SENT state must be aborted if we are to
2144 * enter the LOCAL_BUSY state.
2145 */
2146 l2cap_ertm_abort_rx_srej_sent(sk);
2147 }
2148
2149 l2cap_ertm_send_ack(sk);
2150
2151 break;
2152 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2153 BT_DBG("Exit LOCAL_BUSY");
2154 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2155
2156 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2157 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2158 pi->amp_move_state =
2159 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2160 l2cap_send_move_chan_cfm(pi->conn, pi,
2161 pi->scid,
2162 L2CAP_MOVE_CHAN_CONFIRMED);
2163 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2164 } else if (pi->amp_move_role ==
2165 L2CAP_AMP_MOVE_RESPONDER) {
2166 pi->amp_move_state =
2167 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2168 l2cap_send_move_chan_rsp(pi->conn,
2169 pi->amp_move_cmd_ident,
2170 pi->dcid,
2171 L2CAP_MOVE_CHAN_SUCCESS);
2172 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002173 break;
2174 }
2175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002176 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2177 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2178 struct bt_l2cap_control local_control;
2179
2180 memset(&local_control, 0, sizeof(local_control));
2181 local_control.frame_type = 's';
2182 local_control.super = L2CAP_SFRAME_RR;
2183 local_control.poll = 1;
2184 local_control.reqseq = pi->buffer_seq;
2185 l2cap_ertm_send_sframe(sk, &local_control);
2186
2187 pi->retry_count = 1;
2188 l2cap_ertm_start_monitor_timer(pi);
2189 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002190 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002191 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002192 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2193 l2cap_ertm_process_reqseq(sk, control->reqseq);
2194 break;
2195 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2196 l2cap_ertm_send_rr_or_rnr(sk, 1);
2197 pi->retry_count = 1;
2198 l2cap_ertm_start_monitor_timer(pi);
2199 l2cap_ertm_stop_ack_timer(pi);
2200 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2201 break;
2202 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2203 l2cap_ertm_send_rr_or_rnr(sk, 1);
2204 pi->retry_count = 1;
2205 l2cap_ertm_start_monitor_timer(pi);
2206 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2207 break;
2208 case L2CAP_ERTM_EVENT_RECV_FBIT:
2209 /* Nothing to process */
2210 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002211 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002212 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002213 }
2214
2215 return err;
2216}
2217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2219 struct bt_l2cap_control *control,
2220 struct sk_buff_head *skbs, u8 event)
2221{
2222 struct l2cap_pinfo *pi;
2223 int err = 0;
2224
2225 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2226 (int)event);
2227 pi = l2cap_pi(sk);
2228
2229 switch (event) {
2230 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2231 if (sk->sk_send_head == NULL)
2232 sk->sk_send_head = skb_peek(skbs);
2233 /* Queue data, but don't send. */
2234 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2235 break;
2236 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2237 BT_DBG("Enter LOCAL_BUSY");
2238 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2239
2240 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2241 /* The SREJ_SENT state must be aborted if we are to
2242 * enter the LOCAL_BUSY state.
2243 */
2244 l2cap_ertm_abort_rx_srej_sent(sk);
2245 }
2246
2247 l2cap_ertm_send_ack(sk);
2248
2249 break;
2250 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2251 BT_DBG("Exit LOCAL_BUSY");
2252 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2253
2254 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2255 struct bt_l2cap_control local_control;
2256 memset(&local_control, 0, sizeof(local_control));
2257 local_control.frame_type = 's';
2258 local_control.super = L2CAP_SFRAME_RR;
2259 local_control.poll = 1;
2260 local_control.reqseq = pi->buffer_seq;
2261 l2cap_ertm_send_sframe(sk, &local_control);
2262
2263 pi->retry_count = 1;
2264 l2cap_ertm_start_monitor_timer(pi);
2265 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2266 }
2267 break;
2268 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2269 l2cap_ertm_process_reqseq(sk, control->reqseq);
2270
2271 /* Fall through */
2272
2273 case L2CAP_ERTM_EVENT_RECV_FBIT:
2274 if (control && control->final) {
2275 l2cap_ertm_stop_monitor_timer(pi);
2276 if (pi->unacked_frames > 0)
2277 l2cap_ertm_start_retrans_timer(pi);
2278 pi->retry_count = 0;
2279 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2280 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2281 }
2282 break;
2283 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2284 /* Ignore */
2285 break;
2286 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2287 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2288 l2cap_ertm_send_rr_or_rnr(sk, 1);
2289 l2cap_ertm_start_monitor_timer(pi);
2290 pi->retry_count += 1;
2291 } else
2292 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2293 break;
2294 default:
2295 break;
2296 }
2297
2298 return err;
2299}
2300
2301int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2302 struct sk_buff_head *skbs, u8 event)
2303{
2304 struct l2cap_pinfo *pi;
2305 int err = 0;
2306
2307 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2308 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2309
2310 pi = l2cap_pi(sk);
2311
2312 switch (pi->tx_state) {
2313 case L2CAP_ERTM_TX_STATE_XMIT:
2314 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2315 break;
2316 case L2CAP_ERTM_TX_STATE_WAIT_F:
2317 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2318 break;
2319 default:
2320 /* Ignore event */
2321 break;
2322 }
2323
2324 return err;
2325}
2326
2327int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2328 struct msghdr *msg, size_t len, int reseg)
2329{
2330 struct sk_buff *skb;
2331 u16 sdu_len;
2332 size_t pdu_len;
2333 int err = 0;
2334 u8 sar;
2335
2336 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2337
2338 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2339 * so fragmented skbs are not used. The HCI layer's handling
2340 * of fragmented skbs is not compatible with ERTM's queueing.
2341 */
2342
2343 /* PDU size is derived from the HCI MTU */
2344 pdu_len = l2cap_pi(sk)->conn->mtu;
2345
2346 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2347 if (!l2cap_pi(sk)->ampcon)
2348 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2349
2350 /* Adjust for largest possible L2CAP overhead. */
2351 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2352
2353 /* Remote device may have requested smaller PDUs */
2354 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2355
2356 if (len <= pdu_len) {
2357 sar = L2CAP_SAR_UNSEGMENTED;
2358 sdu_len = 0;
2359 pdu_len = len;
2360 } else {
2361 sar = L2CAP_SAR_START;
2362 sdu_len = len;
2363 pdu_len -= L2CAP_SDULEN_SIZE;
2364 }
2365
2366 while (len) {
2367 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2368
2369 BT_DBG("iframe skb %p", skb);
2370
2371 if (IS_ERR(skb)) {
2372 __skb_queue_purge(seg_queue);
2373 return PTR_ERR(skb);
2374 }
2375
2376 bt_cb(skb)->control.sar = sar;
2377 __skb_queue_tail(seg_queue, skb);
2378
2379 len -= pdu_len;
2380 if (sdu_len) {
2381 sdu_len = 0;
2382 pdu_len += L2CAP_SDULEN_SIZE;
2383 }
2384
2385 if (len <= pdu_len) {
2386 sar = L2CAP_SAR_END;
2387 pdu_len = len;
2388 } else {
2389 sar = L2CAP_SAR_CONTINUE;
2390 }
2391 }
2392
2393 return err;
2394}
2395
2396static inline int is_initial_frame(u8 sar)
2397{
2398 return (sar == L2CAP_SAR_UNSEGMENTED ||
2399 sar == L2CAP_SAR_START);
2400}
2401
2402static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2403 size_t veclen)
2404{
2405 struct sk_buff *frag_iter;
2406
2407 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2408
2409 if (iv->iov_len + skb->len > veclen)
2410 return -ENOMEM;
2411
2412 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2413 iv->iov_len += skb->len;
2414
2415 skb_walk_frags(skb, frag_iter) {
2416 if (iv->iov_len + skb->len > veclen)
2417 return -ENOMEM;
2418
2419 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2420 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2421 frag_iter->len);
2422 iv->iov_len += frag_iter->len;
2423 }
2424
2425 return 0;
2426}
2427
2428int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2429{
2430 void *buf;
2431 int buflen;
2432 int err = 0;
2433 struct sk_buff *skb;
2434 struct msghdr msg;
2435 struct kvec iv;
2436 struct sk_buff_head old_frames;
2437 struct l2cap_pinfo *pi = l2cap_pi(sk);
2438
2439 BT_DBG("sk %p", sk);
2440
2441 if (skb_queue_empty(queue))
2442 return 0;
2443
2444 memset(&msg, 0, sizeof(msg));
2445 msg.msg_iov = (struct iovec *) &iv;
2446
2447 buflen = pi->omtu + L2CAP_FCS_SIZE;
2448 buf = kzalloc(buflen, GFP_TEMPORARY);
2449
2450 if (!buf) {
2451 BT_DBG("Could not allocate resegmentation buffer");
2452 return -ENOMEM;
2453 }
2454
2455 /* Move current frames off the original queue */
2456 __skb_queue_head_init(&old_frames);
2457 skb_queue_splice_tail_init(queue, &old_frames);
2458
2459 while (!skb_queue_empty(&old_frames)) {
2460 struct sk_buff_head current_sdu;
2461 u8 original_sar;
2462
2463 /* Reassemble each SDU from one or more PDUs */
2464
2465 iv.iov_base = buf;
2466 iv.iov_len = 0;
2467
2468 skb = skb_peek(&old_frames);
2469 original_sar = bt_cb(skb)->control.sar;
2470
2471 __skb_unlink(skb, &old_frames);
2472
2473 /* Append data to SDU */
2474 if (pi->extended_control)
2475 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2476 else
2477 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2478
2479 if (original_sar == L2CAP_SAR_START)
2480 skb_pull(skb, L2CAP_SDULEN_SIZE);
2481
2482 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2483
2484 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2485 iv.iov_len -= L2CAP_FCS_SIZE;
2486
2487 /* Free skb */
2488 kfree_skb(skb);
2489
2490 if (err)
2491 break;
2492
2493 while (!skb_queue_empty(&old_frames) && !err) {
2494 /* Check next frame */
2495 skb = skb_peek(&old_frames);
2496
2497 if (is_initial_frame(bt_cb(skb)->control.sar))
2498 break;
2499
2500 __skb_unlink(skb, &old_frames);
2501
2502 /* Append data to SDU */
2503 if (pi->extended_control)
2504 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2505 else
2506 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2507
2508 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2509 skb_pull(skb, L2CAP_SDULEN_SIZE);
2510
2511 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2512
2513 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2514 iv.iov_len -= L2CAP_FCS_SIZE;
2515
2516 /* Free skb */
2517 kfree_skb(skb);
2518 }
2519
2520 if (err)
2521 break;
2522
2523 /* Segment data */
2524
2525 __skb_queue_head_init(&current_sdu);
2526
2527 /* skbs for the SDU were just freed, but the
2528 * resegmenting process could produce more, smaller
2529 * skbs due to smaller PDUs and reduced HCI MTU. The
2530 * overhead from the sk_buff structs could put us over
2531 * the sk_sndbuf limit.
2532 *
2533 * Since this code is running in response to a
2534 * received poll/final packet, it cannot block.
2535 * Therefore, memory allocation needs to be allowed by
2536 * falling back to bt_skb_alloc() (with
2537 * skb_set_owner_w() to maintain sk_wmem_alloc
2538 * correctly).
2539 */
2540 msg.msg_iovlen = iv.iov_len;
2541 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2542 msg.msg_iovlen, 1);
2543
2544 if (err || skb_queue_empty(&current_sdu)) {
2545 BT_DBG("Error %d resegmenting data for socket %p",
2546 err, sk);
2547 __skb_queue_purge(&current_sdu);
2548 break;
2549 }
2550
2551 /* Fix up first PDU SAR bits */
2552 if (!is_initial_frame(original_sar)) {
2553 BT_DBG("Changing SAR bits, %d PDUs",
2554 skb_queue_len(&current_sdu));
2555 skb = skb_peek(&current_sdu);
2556
2557 if (skb_queue_len(&current_sdu) == 1) {
2558 /* Change SAR from 'unsegmented' to 'end' */
2559 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2560 } else {
2561 struct l2cap_hdr *lh;
2562 size_t hdrlen;
2563
2564 /* Change SAR from 'start' to 'continue' */
2565 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2566
2567 /* Start frames contain 2 bytes for
2568 * sdulen and continue frames don't.
2569 * Must rewrite header to eliminate
2570 * sdulen and then adjust l2cap frame
2571 * length.
2572 */
2573 if (pi->extended_control)
2574 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2575 else
2576 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2577
2578 memmove(skb->data + L2CAP_SDULEN_SIZE,
2579 skb->data, hdrlen);
2580 skb_pull(skb, L2CAP_SDULEN_SIZE);
2581 lh = (struct l2cap_hdr *)skb->data;
2582 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2583 L2CAP_SDULEN_SIZE);
2584 }
2585 }
2586
2587 /* Add to queue */
2588 skb_queue_splice_tail(&current_sdu, queue);
2589 }
2590
2591 __skb_queue_purge(&old_frames);
2592 if (err)
2593 __skb_queue_purge(queue);
2594
2595 kfree(buf);
2596
2597 BT_DBG("Queue resegmented, err=%d", err);
2598 return err;
2599}
2600
2601static void l2cap_resegment_worker(struct work_struct *work)
2602{
2603 int err = 0;
2604 struct l2cap_resegment_work *seg_work =
2605 container_of(work, struct l2cap_resegment_work, work);
2606 struct sock *sk = seg_work->sk;
2607
2608 kfree(seg_work);
2609
2610 BT_DBG("sk %p", sk);
2611 lock_sock(sk);
2612
2613 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2614 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002615 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002616 return;
2617 }
2618
2619 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2620
2621 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2622
2623 if (skb_queue_empty(TX_QUEUE(sk)))
2624 sk->sk_send_head = NULL;
2625 else
2626 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2627
2628 if (err)
2629 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2630 else
2631 l2cap_ertm_send(sk);
2632
2633 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002634 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002635}
2636
2637static int l2cap_setup_resegment(struct sock *sk)
2638{
2639 struct l2cap_resegment_work *seg_work;
2640
2641 BT_DBG("sk %p", sk);
2642
2643 if (skb_queue_empty(TX_QUEUE(sk)))
2644 return 0;
2645
2646 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2647 if (!seg_work)
2648 return -ENOMEM;
2649
2650 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002651 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002652 seg_work->sk = sk;
2653
2654 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2655 kfree(seg_work);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002656 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002657 return -ENOMEM;
2658 }
2659
2660 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2661
2662 return 0;
2663}
2664
2665static inline int l2cap_rmem_available(struct sock *sk)
2666{
2667 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2668 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2669 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2670}
2671
2672static inline int l2cap_rmem_full(struct sock *sk)
2673{
2674 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2675 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2676 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2677}
2678
2679void l2cap_amp_move_init(struct sock *sk)
2680{
2681 BT_DBG("sk %p", sk);
2682
2683 if (!l2cap_pi(sk)->conn)
2684 return;
2685
2686 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2687 return;
2688
2689 if (l2cap_pi(sk)->amp_id == 0) {
2690 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2691 return;
2692 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2693 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2694 amp_create_physical(l2cap_pi(sk)->conn, sk);
2695 } else {
2696 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2697 l2cap_pi(sk)->amp_move_state =
2698 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2699 l2cap_pi(sk)->amp_move_id = 0;
2700 l2cap_amp_move_setup(sk);
2701 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2702 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2703 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2704 }
2705}
2706
2707static void l2cap_chan_ready(struct sock *sk)
2708{
2709 struct sock *parent = bt_sk(sk)->parent;
2710
2711 BT_DBG("sk %p, parent %p", sk, parent);
2712
2713 l2cap_pi(sk)->conf_state = 0;
2714 l2cap_sock_clear_timer(sk);
2715
2716 if (!parent) {
2717 /* Outgoing channel.
2718 * Wake up socket sleeping on connect.
2719 */
2720 sk->sk_state = BT_CONNECTED;
2721 sk->sk_state_change(sk);
2722 } else {
2723 /* Incoming channel.
2724 * Wake up socket sleeping on accept.
2725 */
2726 parent->sk_data_ready(parent, 0);
2727 }
2728}
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730/* Copy frame to all raw sockets on that connection */
2731static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2732{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002733 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002735 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
2737 BT_DBG("conn %p", conn);
2738
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002739 read_lock(&l->lock);
2740 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2741 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 continue;
2743
2744 /* Don't send frame to the socket it came from */
2745 if (skb->sk == sk)
2746 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002747 nskb = skb_clone(skb, GFP_ATOMIC);
2748 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 continue;
2750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002751 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 kfree_skb(nskb);
2753 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755}
2756
2757/* ---- L2CAP signalling commands ---- */
2758static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2759 u8 code, u8 ident, u16 dlen, void *data)
2760{
2761 struct sk_buff *skb, **frag;
2762 struct l2cap_cmd_hdr *cmd;
2763 struct l2cap_hdr *lh;
2764 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002767 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2768 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
2770 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773 skb = bt_skb_alloc(count, GFP_ATOMIC);
2774 if (!skb)
2775 return NULL;
2776
2777 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002778 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002779
2780 if (conn->hcon->type == LE_LINK)
2781 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2782 else
2783 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
2785 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2786 cmd->code = code;
2787 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002788 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
2790 if (dlen) {
2791 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2792 memcpy(skb_put(skb, count), data, count);
2793 data += count;
2794 }
2795
2796 len -= skb->len;
2797
2798 /* Continuation fragments (no L2CAP header) */
2799 frag = &skb_shinfo(skb)->frag_list;
2800 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002801 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2804 if (!*frag)
2805 goto fail;
2806
2807 memcpy(skb_put(*frag, count), data, count);
2808
2809 len -= count;
2810 data += count;
2811
2812 frag = &(*frag)->next;
2813 }
2814
2815 return skb;
2816
2817fail:
2818 kfree_skb(skb);
2819 return NULL;
2820}
2821
2822static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2823{
2824 struct l2cap_conf_opt *opt = *ptr;
2825 int len;
2826
2827 len = L2CAP_CONF_OPT_SIZE + opt->len;
2828 *ptr += len;
2829
2830 *type = opt->type;
2831 *olen = opt->len;
2832
2833 switch (opt->len) {
2834 case 1:
2835 *val = *((u8 *) opt->val);
2836 break;
2837
2838 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002839 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 break;
2841
2842 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002843 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 break;
2845
2846 default:
2847 *val = (unsigned long) opt->val;
2848 break;
2849 }
2850
2851 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2852 return len;
2853}
2854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2856{
2857 struct l2cap_conf_opt *opt = *ptr;
2858
2859 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2860
2861 opt->type = type;
2862 opt->len = len;
2863
2864 switch (len) {
2865 case 1:
2866 *((u8 *) opt->val) = val;
2867 break;
2868
2869 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002870 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 break;
2872
2873 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002874 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 break;
2876
2877 default:
2878 memcpy(opt->val, (void *) val, len);
2879 break;
2880 }
2881
2882 *ptr += L2CAP_CONF_OPT_SIZE + len;
2883}
2884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002886{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002887 struct delayed_work *delayed =
2888 container_of(work, struct delayed_work, work);
2889 struct l2cap_pinfo *pi =
2890 container_of(delayed, struct l2cap_pinfo, ack_work);
2891 struct sock *sk = (struct sock *)pi;
2892 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 BT_DBG("sk %p", sk);
2895
2896 if (!sk)
2897 return;
2898
2899 lock_sock(sk);
2900
2901 if (!l2cap_pi(sk)->conn) {
2902 release_sock(sk);
2903 return;
2904 }
2905
2906 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2907 l2cap_pi(sk)->last_acked_seq,
2908 l2cap_pi(sk));
2909
2910 if (frames_to_ack)
2911 l2cap_ertm_send_rr_or_rnr(sk, 0);
2912
2913 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002914}
2915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002916static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002917{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002918 struct delayed_work *delayed =
2919 container_of(work, struct delayed_work, work);
2920 struct l2cap_pinfo *pi =
2921 container_of(delayed, struct l2cap_pinfo, retrans_work);
2922 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002924 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926 if (!sk)
2927 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931 if (!l2cap_pi(sk)->conn) {
2932 release_sock(sk);
2933 return;
2934 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002936 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2937 release_sock(sk);
2938}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2941{
2942 struct delayed_work *delayed =
2943 container_of(work, struct delayed_work, work);
2944 struct l2cap_pinfo *pi =
2945 container_of(delayed, struct l2cap_pinfo, monitor_work);
2946 struct sock *sk = (struct sock *)pi;
2947
2948 BT_DBG("sk %p", sk);
2949
2950 if (!sk)
2951 return;
2952
2953 lock_sock(sk);
2954
2955 if (!l2cap_pi(sk)->conn) {
2956 release_sock(sk);
2957 return;
2958 }
2959
2960 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2961
2962 release_sock(sk);
2963}
2964
2965static inline void l2cap_ertm_init(struct sock *sk)
2966{
2967 l2cap_pi(sk)->next_tx_seq = 0;
2968 l2cap_pi(sk)->expected_tx_seq = 0;
2969 l2cap_pi(sk)->expected_ack_seq = 0;
2970 l2cap_pi(sk)->unacked_frames = 0;
2971 l2cap_pi(sk)->buffer_seq = 0;
2972 l2cap_pi(sk)->frames_sent = 0;
2973 l2cap_pi(sk)->last_acked_seq = 0;
2974 l2cap_pi(sk)->sdu = NULL;
2975 l2cap_pi(sk)->sdu_last_frag = NULL;
2976 l2cap_pi(sk)->sdu_len = 0;
2977 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2978
2979 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2980 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2981
2982 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2983 l2cap_pi(sk)->rx_state);
2984
2985 l2cap_pi(sk)->amp_id = 0;
2986 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2987 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2988 l2cap_pi(sk)->amp_move_reqseq = 0;
2989 l2cap_pi(sk)->amp_move_event = 0;
2990
2991 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2992 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2993 l2cap_ertm_retrans_timeout);
2994 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2995 l2cap_ertm_monitor_timeout);
2996 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2997 skb_queue_head_init(SREJ_QUEUE(sk));
2998 skb_queue_head_init(TX_QUEUE(sk));
2999
3000 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
3001 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
3002 l2cap_pi(sk)->remote_tx_win);
3003}
3004
3005void l2cap_ertm_destruct(struct sock *sk)
3006{
3007 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
3008 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
3009}
3010
3011void l2cap_ertm_shutdown(struct sock *sk)
3012{
3013 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
3014 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
3015 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
3016}
3017
3018void l2cap_ertm_recv_done(struct sock *sk)
3019{
3020 lock_sock(sk);
3021
3022 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
3023 release_sock(sk);
3024 return;
3025 }
3026
3027 /* Consume any queued incoming frames and update local busy status */
3028 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
3029 l2cap_ertm_rx_queued_iframes(sk))
3030 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
3031 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3032 l2cap_rmem_available(sk))
3033 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
3034
3035 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003036}
3037
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003038static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3039{
3040 switch (mode) {
3041 case L2CAP_MODE_STREAMING:
3042 case L2CAP_MODE_ERTM:
3043 if (l2cap_mode_supported(mode, remote_feat_mask))
3044 return mode;
3045 /* fall through */
3046 default:
3047 return L2CAP_MODE_BASIC;
3048 }
3049}
3050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003051static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003053 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3054 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3055 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3056 pi->extended_control = 1;
3057 } else {
3058 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3059 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3060
3061 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3062 pi->extended_control = 0;
3063 }
3064}
3065
3066static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3067 struct hci_ext_fs *new,
3068 struct hci_ext_fs *agg)
3069{
3070 *agg = *cur;
3071 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3072 /* current flow spec has known rate */
3073 if ((new->max_sdu == 0xFFFF) ||
3074 (new->sdu_arr_time == 0xFFFFFFFF)) {
3075 /* new fs has unknown rate, so aggregate is unknown */
3076 agg->max_sdu = 0xFFFF;
3077 agg->sdu_arr_time = 0xFFFFFFFF;
3078 } else {
3079 /* new fs has known rate, so aggregate is known */
3080 u64 cur_rate;
3081 u64 new_rate;
3082 cur_rate = cur->max_sdu * 1000000ULL;
3083 if (cur->sdu_arr_time)
3084 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3085 new_rate = new->max_sdu * 1000000ULL;
3086 if (new->sdu_arr_time)
3087 new_rate = div_u64(new_rate, new->sdu_arr_time);
3088 cur_rate = cur_rate + new_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003089 if (cur_rate)
3090 agg->sdu_arr_time = div64_u64(
3091 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003092 }
3093 }
3094}
3095
3096static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3097{
3098 struct hci_ext_fs tx_fs;
3099 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003101 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003103 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3104 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3105 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3106 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3107 return 0;
3108
3109 l2cap_aggregate_fs(&chan->tx_fs,
3110 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3111 l2cap_aggregate_fs(&chan->rx_fs,
3112 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3113 hci_chan_modify(chan, &tx_fs, &rx_fs);
3114 return 1;
3115}
3116
3117static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3118 struct hci_ext_fs *old,
3119 struct hci_ext_fs *agg)
3120{
3121 *agg = *cur;
3122 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3123 u64 cur_rate;
3124 u64 old_rate;
3125 cur_rate = cur->max_sdu * 1000000ULL;
3126 if (cur->sdu_arr_time)
3127 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3128 old_rate = old->max_sdu * 1000000ULL;
3129 if (old->sdu_arr_time)
3130 old_rate = div_u64(old_rate, old->sdu_arr_time);
3131 cur_rate = cur_rate - old_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003132 if (cur_rate)
3133 agg->sdu_arr_time = div64_u64(
3134 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003135 }
3136}
3137
3138static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3139{
3140 struct hci_ext_fs tx_fs;
3141 struct hci_ext_fs rx_fs;
3142
3143 BT_DBG("chan %p", chan);
3144
3145 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3146 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3147 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3148 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3149 return 0;
3150
3151 l2cap_deaggregate_fs(&chan->tx_fs,
3152 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3153 l2cap_deaggregate_fs(&chan->rx_fs,
3154 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3155 hci_chan_modify(chan, &tx_fs, &rx_fs);
3156 return 1;
3157}
3158
3159static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3160{
3161 struct hci_dev *hdev;
3162 struct hci_conn *hcon;
3163 struct hci_chan *chan;
3164
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08003165 hdev = hci_dev_get(amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003166 if (!hdev)
3167 return NULL;
3168
3169 BT_DBG("hdev %s", hdev->name);
3170
3171 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003172 if (!hcon) {
3173 chan = NULL;
3174 goto done;
3175 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003176
3177 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3178 if (chan) {
3179 l2cap_aggregate(chan, pi);
3180 hci_chan_hold(chan);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003181 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182 }
3183
3184 if (bt_sk(pi)->parent) {
3185 /* Incoming connection */
3186 chan = hci_chan_accept(hcon,
3187 (struct hci_ext_fs *) &pi->local_fs,
3188 (struct hci_ext_fs *) &pi->remote_fs);
3189 } else {
3190 /* Outgoing connection */
3191 chan = hci_chan_create(hcon,
3192 (struct hci_ext_fs *) &pi->local_fs,
3193 (struct hci_ext_fs *) &pi->remote_fs);
3194 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08003195done:
3196 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 return chan;
3198}
3199
3200int l2cap_build_conf_req(struct sock *sk, void *data)
3201{
3202 struct l2cap_pinfo *pi = l2cap_pi(sk);
3203 struct l2cap_conf_req *req = data;
3204 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3205 void *ptr = req->data;
3206
3207 BT_DBG("sk %p", sk);
3208
3209 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003210 goto done;
3211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003212 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003213 case L2CAP_MODE_STREAMING:
3214 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003215 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003216 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003217
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003218 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003219 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003220 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003221 break;
3222 }
3223
3224done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003225 if (pi->imtu != L2CAP_DEFAULT_MTU)
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003228 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003229 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003230 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3231 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003232 break;
3233
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003234 rfc.txwin_size = 0;
3235 rfc.max_transmit = 0;
3236 rfc.retrans_timeout = 0;
3237 rfc.monitor_timeout = 0;
3238 rfc.max_pdu_size = 0;
3239
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3241 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003242 break;
3243
3244 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003245 l2cap_setup_txwin(pi);
3246 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3247 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3248 else
3249 rfc.txwin_size = pi->tx_win;
3250 rfc.max_transmit = pi->max_tx;
3251 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3252 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003253 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003254 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3255 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003256
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3258 (unsigned long) &rfc);
3259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003260 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3261 pi->extended_control) {
3262 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3263 pi->tx_win);
3264 }
3265
3266 if (pi->amp_id) {
3267 /* default best effort extended flow spec */
3268 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3269 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3271 sizeof(fs), (unsigned long) &fs);
3272 }
3273
3274 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003275 break;
3276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003277 if (pi->fcs == L2CAP_FCS_NONE ||
3278 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3279 pi->fcs = L2CAP_FCS_NONE;
3280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003281 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003282 break;
3283
3284 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003285 rfc.txwin_size = 0;
3286 rfc.max_transmit = 0;
3287 rfc.retrans_timeout = 0;
3288 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003289 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003290 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3291 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003292
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3294 (unsigned long) &rfc);
3295
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003296 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3297 pi->extended_control) {
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3299 }
3300
3301 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003302 break;
3303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003304 if (pi->fcs == L2CAP_FCS_NONE ||
3305 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3306 pi->fcs = L2CAP_FCS_NONE;
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003308 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003309 break;
3310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003312 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003313 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314
3315 return ptr - data;
3316}
3317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003318
3319static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003321 struct l2cap_pinfo *pi = l2cap_pi(sk);
3322 struct l2cap_conf_req *req = data;
3323 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3324 void *ptr = req->data;
3325 u32 be_flush_to;
3326
3327 BT_DBG("sk %p", sk);
3328
3329 /* convert to milliseconds, round up */
3330 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3331
3332 switch (pi->mode) {
3333 case L2CAP_MODE_ERTM:
3334 rfc.mode = L2CAP_MODE_ERTM;
3335 rfc.txwin_size = pi->tx_win;
3336 rfc.max_transmit = pi->max_tx;
3337 if (pi->amp_move_id) {
3338 rfc.retrans_timeout =
3339 cpu_to_le16((3 * be_flush_to) + 500);
3340 rfc.monitor_timeout =
3341 cpu_to_le16((3 * be_flush_to) + 500);
3342 } else {
3343 rfc.retrans_timeout =
3344 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3345 rfc.monitor_timeout =
3346 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3347 }
3348 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3349 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3350 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3351
3352 break;
3353
3354 default:
3355 return -ECONNREFUSED;
3356 }
3357
3358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3359 (unsigned long) &rfc);
3360
3361 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3362
3363 /* TODO assign fcs for br/edr based on socket config option */
3364 if (pi->amp_move_id)
3365 pi->local_conf.fcs = L2CAP_FCS_NONE;
3366 else
3367 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3368
3369 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3370 pi->local_conf.fcs);
3371
3372 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3373 }
3374
3375 req->dcid = cpu_to_le16(pi->dcid);
3376 req->flags = cpu_to_le16(0);
3377
3378 return ptr - data;
3379}
3380
3381static int l2cap_parse_conf_req(struct sock *sk, void *data)
3382{
3383 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003384 struct l2cap_conf_rsp *rsp = data;
3385 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003386 void *req = pi->conf_req;
3387 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003388 int type, hint, olen;
3389 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003390 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003391 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003392 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003393 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003395 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003396
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003397 while (len >= L2CAP_CONF_OPT_SIZE) {
3398 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003400 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003401 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003402
3403 switch (type) {
3404 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003405 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003406 break;
3407
3408 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003409 pi->flush_to = val;
3410 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3411 result = L2CAP_CONF_UNACCEPT;
3412 else
3413 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003414 break;
3415
3416 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003417 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3418 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003419 break;
3420
Marcel Holtmann6464f352007-10-20 13:39:51 +02003421 case L2CAP_CONF_RFC:
3422 if (olen == sizeof(rfc))
3423 memcpy(&rfc, (void *) val, olen);
3424 break;
3425
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003426 case L2CAP_CONF_FCS:
3427 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003428 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3429 pi->remote_conf.fcs = val;
3430 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003432 case L2CAP_CONF_EXT_FS:
3433 if (olen == sizeof(fs)) {
3434 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3435 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3436 result = L2CAP_CONF_UNACCEPT;
3437 break;
3438 }
3439 memcpy(&fs, (void *) val, olen);
3440 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3441 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3442 break;
3443 }
3444 pi->remote_conf.flush_to =
3445 le32_to_cpu(fs.flush_to);
3446 pi->remote_fs.id = fs.id;
3447 pi->remote_fs.type = fs.type;
3448 pi->remote_fs.max_sdu =
3449 le16_to_cpu(fs.max_sdu);
3450 pi->remote_fs.sdu_arr_time =
3451 le32_to_cpu(fs.sdu_arr_time);
3452 pi->remote_fs.acc_latency =
3453 le32_to_cpu(fs.acc_latency);
3454 pi->remote_fs.flush_to =
3455 le32_to_cpu(fs.flush_to);
3456 }
3457 break;
3458
3459 case L2CAP_CONF_EXT_WINDOW:
3460 pi->extended_control = 1;
3461 pi->remote_tx_win = val;
3462 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3463 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003464 break;
3465
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003466 default:
3467 if (hint)
3468 break;
3469
3470 result = L2CAP_CONF_UNKNOWN;
3471 *((u8 *) ptr++) = type;
3472 break;
3473 }
3474 }
3475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003476 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003477 goto done;
3478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003480 case L2CAP_MODE_STREAMING:
3481 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003482 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3483 pi->mode = l2cap_select_mode(rfc.mode,
3484 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003485 break;
3486 }
3487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003488 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003490
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003491 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003492 }
3493
3494done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003495 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003496 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003497 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003498
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003499 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003500 return -ECONNREFUSED;
3501
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3503 sizeof(rfc), (unsigned long) &rfc);
3504 }
3505
3506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003507 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3508 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3509 return -ECONNREFUSED;
3510
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003511 if (result == L2CAP_CONF_SUCCESS) {
3512 /* Configure output options and let the other side know
3513 * which ones we don't like. */
3514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003515 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003516 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003517 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003518 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003519 else {
3520 pi->omtu = mtu;
3521 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3522 }
3523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003524
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003525 switch (rfc.mode) {
3526 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003527 pi->fcs = L2CAP_FCS_NONE;
3528 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003529 break;
3530
3531 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003532 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3533 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003535 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003536
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003537 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003538
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003539 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003540 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003541 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003542 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003543
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003544 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003545
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3547 sizeof(rfc), (unsigned long) &rfc);
3548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003549 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3551 sizeof(fs), (unsigned long) &fs);
3552
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003553 break;
3554
3555 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003556 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003558 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003559
3560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3561 sizeof(rfc), (unsigned long) &rfc);
3562
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003563 break;
3564
3565 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003566 result = L2CAP_CONF_UNACCEPT;
3567
3568 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003569 rfc.mode = pi->mode;
3570 }
3571
3572 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3573 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3574 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3575 result = L2CAP_CONF_PENDING;
3576
3577 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3578 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003579 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003580 /* Trigger logical link creation only on AMP */
3581
Peter Krystadf453bb32011-07-19 17:23:34 -07003582 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003583 if (!chan)
3584 return -ECONNREFUSED;
3585
3586 chan->l2cap_sk = sk;
3587 if (chan->state == BT_CONNECTED)
3588 l2cap_create_cfm(chan, 0);
3589 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003590 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003591
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003592 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003593 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003594 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003595 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003596 rsp->result = cpu_to_le16(result);
3597 rsp->flags = cpu_to_le16(0x0000);
3598
3599 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600}
3601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003602static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003603{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003604 struct l2cap_pinfo *pi = l2cap_pi(sk);
3605 struct l2cap_conf_rsp *rsp = data;
3606 void *ptr = rsp->data;
3607 void *req = pi->conf_req;
3608 int len = pi->conf_len;
3609 int type, hint, olen;
3610 unsigned long val;
3611 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3612 struct l2cap_conf_ext_fs fs;
3613 u16 mtu = pi->omtu;
3614 u16 tx_win = pi->remote_tx_win;
3615 u16 result = L2CAP_CONF_SUCCESS;
3616
3617 BT_DBG("sk %p", sk);
3618
3619 while (len >= L2CAP_CONF_OPT_SIZE) {
3620 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3621
3622 hint = type & L2CAP_CONF_HINT;
3623 type &= L2CAP_CONF_MASK;
3624
3625 switch (type) {
3626 case L2CAP_CONF_MTU:
3627 mtu = val;
3628 break;
3629
3630 case L2CAP_CONF_FLUSH_TO:
3631 if (pi->amp_move_id)
3632 result = L2CAP_CONF_UNACCEPT;
3633 else
3634 pi->remote_conf.flush_to = val;
3635 break;
3636
3637 case L2CAP_CONF_QOS:
3638 if (pi->amp_move_id)
3639 result = L2CAP_CONF_UNACCEPT;
3640 break;
3641
3642 case L2CAP_CONF_RFC:
3643 if (olen == sizeof(rfc))
3644 memcpy(&rfc, (void *) val, olen);
3645 if (pi->mode != rfc.mode ||
3646 rfc.mode == L2CAP_MODE_BASIC)
3647 result = L2CAP_CONF_UNACCEPT;
3648 break;
3649
3650 case L2CAP_CONF_FCS:
3651 pi->remote_conf.fcs = val;
3652 break;
3653
3654 case L2CAP_CONF_EXT_FS:
3655 if (olen == sizeof(fs)) {
3656 memcpy(&fs, (void *) val, olen);
3657 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3658 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3659 else {
3660 pi->remote_conf.flush_to =
3661 le32_to_cpu(fs.flush_to);
3662 }
3663 }
3664 break;
3665
3666 case L2CAP_CONF_EXT_WINDOW:
3667 tx_win = val;
3668 break;
3669
3670 default:
3671 if (hint)
3672 break;
3673
3674 result = L2CAP_CONF_UNKNOWN;
3675 *((u8 *) ptr++) = type;
3676 break;
3677 }
3678 }
3679
3680 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3681 result, pi->mode, rfc.mode);
3682
3683 if (result == L2CAP_CONF_SUCCESS) {
3684 /* Configure output options and let the other side know
3685 * which ones we don't like. */
3686
3687 /* Don't allow mtu to decrease. */
3688 if (mtu < pi->omtu)
3689 result = L2CAP_CONF_UNACCEPT;
3690
3691 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3692
3693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3694
3695 /* Don't allow extended transmit window to change. */
3696 if (tx_win != pi->remote_tx_win) {
3697 result = L2CAP_CONF_UNACCEPT;
3698 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3699 pi->remote_tx_win);
3700 }
3701
3702 if (rfc.mode == L2CAP_MODE_ERTM) {
3703 pi->remote_conf.retrans_timeout =
3704 le16_to_cpu(rfc.retrans_timeout);
3705 pi->remote_conf.monitor_timeout =
3706 le16_to_cpu(rfc.monitor_timeout);
3707
3708 BT_DBG("remote conf monitor timeout %d",
3709 pi->remote_conf.monitor_timeout);
3710
3711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3712 sizeof(rfc), (unsigned long) &rfc);
3713 }
3714
3715 }
3716
3717 if (result != L2CAP_CONF_SUCCESS)
3718 goto done;
3719
3720 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3721
3722 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3723 pi->flush_to = pi->remote_conf.flush_to;
3724 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3725
3726 if (pi->amp_move_id)
3727 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3728 else
3729 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3730 BT_DBG("mode %d monitor timeout %d",
3731 pi->mode, pi->monitor_timeout);
3732
3733 }
3734
3735done:
3736 rsp->scid = cpu_to_le16(pi->dcid);
3737 rsp->result = cpu_to_le16(result);
3738 rsp->flags = cpu_to_le16(0x0000);
3739
3740 return ptr - data;
3741}
3742
3743static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3744{
3745 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003746 struct l2cap_conf_req *req = data;
3747 void *ptr = req->data;
3748 int type, olen;
3749 unsigned long val;
3750 struct l2cap_conf_rfc rfc;
3751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003752 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003753
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003754 /* Initialize rfc in case no rfc option is received */
3755 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003756 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3757 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3758 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003759
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003760 while (len >= L2CAP_CONF_OPT_SIZE) {
3761 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3762
3763 switch (type) {
3764 case L2CAP_CONF_MTU:
3765 if (val < L2CAP_DEFAULT_MIN_MTU) {
3766 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003767 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003768 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003769 pi->imtu = val;
3770 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003771 break;
3772
3773 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003774 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003775 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003776 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003777 break;
3778
3779 case L2CAP_CONF_RFC:
3780 if (olen == sizeof(rfc))
3781 memcpy(&rfc, (void *)val, olen);
3782
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003783 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3784 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003785 return -ECONNREFUSED;
3786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003787 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003788
3789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3790 sizeof(rfc), (unsigned long) &rfc);
3791 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003792
3793 case L2CAP_CONF_EXT_WINDOW:
3794 pi->tx_win = val;
3795
3796 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3797 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3798
3799 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3800 2, pi->tx_win);
3801 break;
3802
3803 default:
3804 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003805 }
3806 }
3807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003808 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003809 return -ECONNREFUSED;
3810
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003811 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003812
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003813 if (*result == L2CAP_CONF_SUCCESS) {
3814 switch (rfc.mode) {
3815 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003816 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3817 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3818 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003819 break;
3820 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003821 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003822 }
3823 }
3824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003825 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003826 req->flags = cpu_to_le16(0x0000);
3827
3828 return ptr - data;
3829}
3830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003831static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832{
3833 struct l2cap_conf_rsp *rsp = data;
3834 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003836 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003838 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003839 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003840 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841
3842 return ptr - data;
3843}
3844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003845static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003846{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003847 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003848 int type, olen;
3849 unsigned long val;
3850 struct l2cap_conf_rfc rfc;
3851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003852 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003853
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003854 /* Initialize rfc in case no rfc option is received */
3855 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003856 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3857 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3858 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003860 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003861 return;
3862
3863 while (len >= L2CAP_CONF_OPT_SIZE) {
3864 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3865
3866 switch (type) {
3867 case L2CAP_CONF_RFC:
3868 if (olen == sizeof(rfc))
3869 memcpy(&rfc, (void *)val, olen);
3870 goto done;
3871 }
3872 }
3873
3874done:
3875 switch (rfc.mode) {
3876 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003877 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003880 break;
3881 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003882 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003883 }
3884}
3885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003886static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3887{
3888 struct l2cap_pinfo *pi = l2cap_pi(sk);
3889 int type, olen;
3890 unsigned long val;
3891 struct l2cap_conf_ext_fs fs;
3892
3893 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3894
3895 while (len >= L2CAP_CONF_OPT_SIZE) {
3896 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3897 if ((type == L2CAP_CONF_EXT_FS) &&
3898 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3899 memcpy(&fs, (void *)val, olen);
3900 pi->local_fs.id = fs.id;
3901 pi->local_fs.type = fs.type;
3902 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3903 pi->local_fs.sdu_arr_time =
3904 le32_to_cpu(fs.sdu_arr_time);
3905 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3906 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3907 break;
3908 }
3909 }
3910
3911}
3912
3913static int l2cap_finish_amp_move(struct sock *sk)
3914{
3915 struct l2cap_pinfo *pi;
3916 int err;
3917
3918 BT_DBG("sk %p", sk);
3919
3920 pi = l2cap_pi(sk);
3921
3922 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3923 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3924
3925 if (pi->ampcon)
3926 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3927 else
3928 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3929
3930 err = l2cap_setup_resegment(sk);
3931
3932 return err;
3933}
3934
3935static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3936 u16 result)
3937{
3938 int err = 0;
3939 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3940 struct l2cap_pinfo *pi = l2cap_pi(sk);
3941
3942 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3943
3944 if (pi->reconf_state == L2CAP_RECONF_NONE)
3945 return -ECONNREFUSED;
3946
3947 if (result == L2CAP_CONF_SUCCESS) {
3948 while (len >= L2CAP_CONF_OPT_SIZE) {
3949 int type, olen;
3950 unsigned long val;
3951
3952 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3953
3954 if (type == L2CAP_CONF_RFC) {
3955 if (olen == sizeof(rfc))
3956 memcpy(&rfc, (void *)val, olen);
3957 if (rfc.mode != pi->mode &&
3958 rfc.mode != L2CAP_MODE_ERTM) {
3959 err = -ECONNREFUSED;
3960 goto done;
3961 }
3962 break;
3963 }
3964 }
3965 }
3966
3967done:
3968 l2cap_ertm_stop_ack_timer(pi);
3969 l2cap_ertm_stop_retrans_timer(pi);
3970 l2cap_ertm_stop_monitor_timer(pi);
3971
3972 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3973 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3974
3975 /* Respond to poll */
3976 err = l2cap_answer_move_poll(sk);
3977
3978 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3979
3980 /* If moving to BR/EDR, use default timeout defined by
3981 * the spec */
3982 if (pi->amp_move_id == 0)
3983 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3984
3985 if (pi->mode == L2CAP_MODE_ERTM) {
3986 l2cap_ertm_tx(sk, NULL, NULL,
3987 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3988 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3989 }
3990 }
3991
3992 return err;
3993}
3994
3995
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003996static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3997{
3998 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3999
4000 if (rej->reason != 0x0000)
4001 return 0;
4002
4003 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4004 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004005 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01004006
4007 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004008 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004009
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004010 l2cap_conn_start(conn);
4011 }
4012
4013 return 0;
4014}
4015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004016static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd,
4018 u8 *data, u8 rsp_code,
4019 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004021 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4023 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04004024 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004025 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
4027 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004028 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
4030 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
4031
4032 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004033 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
4034 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 result = L2CAP_CR_BAD_PSM;
4036 goto sendresp;
4037 }
4038
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00004039 bh_lock_sock(parent);
4040
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004041 /* Check if the ACL is secure enough (if not SDP) */
4042 if (psm != cpu_to_le16(0x0001) &&
4043 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01004044 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004045 result = L2CAP_CR_SEC_BLOCK;
4046 goto response;
4047 }
4048
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 result = L2CAP_CR_NO_MEM;
4050
4051 /* Check for backlog size */
4052 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004053 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 goto response;
4055 }
4056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004057 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
4058 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 goto response;
4060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
4063 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004064 if (__l2cap_get_chan_by_dcid(list, scid)) {
4065 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004067 l2cap_sock_kill(sk);
4068 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 goto response;
4070 }
4071
4072 hci_conn_hold(conn->hcon);
4073
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004074 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 bacpy(&bt_sk(sk)->src, conn->src);
4076 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004077 l2cap_pi(sk)->psm = psm;
4078 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004080 bt_accept_enqueue(parent, sk);
4081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004082 __l2cap_chan_add(conn, sk);
4083 dcid = l2cap_pi(sk)->scid;
4084 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004086 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004088 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089
Marcel Holtmann984947d2009-02-06 23:35:19 +01004090 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004091 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004092 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004093 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004094 result = L2CAP_CR_PEND;
4095 status = L2CAP_CS_AUTHOR_PEND;
4096 parent->sk_data_ready(parent, 0);
4097 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004098 /* Force pending result for AMP controllers.
4099 * The connection will succeed after the
4100 * physical link is up. */
4101 if (amp_id) {
4102 sk->sk_state = BT_CONNECT2;
4103 result = L2CAP_CR_PEND;
4104 } else {
4105 sk->sk_state = BT_CONFIG;
4106 result = L2CAP_CR_SUCCESS;
4107 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004108 status = L2CAP_CS_NO_INFO;
4109 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004110 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004111 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004112 result = L2CAP_CR_PEND;
4113 status = L2CAP_CS_AUTHEN_PEND;
4114 }
4115 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004116 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004117 result = L2CAP_CR_PEND;
4118 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 }
4120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004121 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
4123response:
4124 bh_unlock_sock(parent);
4125
4126sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004127 rsp.scid = cpu_to_le16(scid);
4128 rsp.dcid = cpu_to_le16(dcid);
4129 rsp.result = cpu_to_le16(result);
4130 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004131 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004133 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004134 struct l2cap_info_req info;
4135 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4136
4137 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4138 conn->info_ident = l2cap_get_ident(conn);
4139
4140 mod_timer(&conn->info_timer, jiffies +
4141 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4142
4143 l2cap_send_cmd(conn, conn->info_ident,
4144 L2CAP_INFO_REQ, sizeof(info), &info);
4145 }
4146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004147 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004148 result == L2CAP_CR_SUCCESS) {
4149 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004150 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004151 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004152 l2cap_build_conf_req(sk, buf), buf);
4153 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004154 }
4155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004156 return sk;
4157}
4158
4159static inline int l2cap_connect_req(struct l2cap_conn *conn,
4160 struct l2cap_cmd_hdr *cmd, u8 *data)
4161{
4162 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 return 0;
4164}
4165
4166static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4167{
4168 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4169 u16 scid, dcid, result, status;
4170 struct sock *sk;
4171 u8 req[128];
4172
4173 scid = __le16_to_cpu(rsp->scid);
4174 dcid = __le16_to_cpu(rsp->dcid);
4175 result = __le16_to_cpu(rsp->result);
4176 status = __le16_to_cpu(rsp->status);
4177
4178 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4179
4180 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004181 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4182 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004183 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004185 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4186 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004187 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 }
4189
4190 switch (result) {
4191 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004192 sk->sk_state = BT_CONFIG;
4193 l2cap_pi(sk)->ident = 0;
4194 l2cap_pi(sk)->dcid = dcid;
4195 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004197 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004198 break;
4199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004200 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004203 l2cap_build_conf_req(sk, req), req);
4204 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 break;
4206
4207 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004208 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 break;
4210
4211 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004212 /* don't delete l2cap channel if sk is owned by user */
4213 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004214 sk->sk_state = BT_DISCONN;
4215 l2cap_sock_clear_timer(sk);
4216 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004217 break;
4218 }
4219
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004220 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 break;
4222 }
4223
4224 bh_unlock_sock(sk);
4225 return 0;
4226}
4227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004228static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004229{
4230 /* FCS is enabled only in ERTM or streaming mode, if one or both
4231 * sides request it.
4232 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004233 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4234 pi->fcs = L2CAP_FCS_NONE;
4235 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4236 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004237}
4238
Al Viro88219a02007-07-29 00:17:25 -07004239static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240{
4241 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4242 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004243 u8 rspbuf[64];
4244 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004246 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004247 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
4249 dcid = __le16_to_cpu(req->dcid);
4250 flags = __le16_to_cpu(req->flags);
4251
4252 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004254 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4255 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 return -ENOENT;
4257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004258 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4259 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4260 sk->sk_state, l2cap_pi(sk)->rx_state,
4261 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4262 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004264 /* Detect a reconfig request due to channel move between
4265 * BR/EDR and AMP
4266 */
4267 if (sk->sk_state == BT_CONNECTED &&
4268 l2cap_pi(sk)->rx_state ==
4269 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4270 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4271
4272 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4273 amp_move_reconf = 1;
4274
4275 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004276 struct l2cap_cmd_rej rej;
4277
4278 rej.reason = cpu_to_le16(0x0002);
4279 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4280 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004281 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004282 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004283
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004284 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004285 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004286 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004287 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004288 l2cap_build_conf_rsp(sk, rspbuf,
4289 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004290 goto unlock;
4291 }
4292
4293 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004294 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4295 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296
4297 if (flags & 0x0001) {
4298 /* Incomplete config. Send empty response. */
4299 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004300 l2cap_build_conf_rsp(sk, rspbuf,
4301 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 goto unlock;
4303 }
4304
4305 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004306 if (!amp_move_reconf)
4307 len = l2cap_parse_conf_req(sk, rspbuf);
4308 else
4309 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4310
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004311 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004312 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004316 l2cap_pi(sk)->conf_ident = cmd->ident;
4317 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4318
4319 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4320 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4321 !l2cap_pi(sk)->amp_id) {
4322 /* Send success response right after pending if using
4323 * lockstep config on BR/EDR
4324 */
4325 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4326 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4327 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4328 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004329
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004330 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004331 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004333 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004334 goto unlock;
4335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004336 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004338 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4339 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004341 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4342 set_default_fcs(l2cap_pi(sk));
4343
4344 sk->sk_state = BT_CONNECTED;
4345
4346 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4347 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4348 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004349
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004351 goto unlock;
4352 }
4353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004354 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004355 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004356 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004358 l2cap_build_conf_req(sk, buf), buf);
4359 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 }
4361
4362unlock:
4363 bh_unlock_sock(sk);
4364 return 0;
4365}
4366
4367static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4368{
4369 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4370 u16 scid, flags, result;
4371 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004372 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004373 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374
4375 scid = __le16_to_cpu(rsp->scid);
4376 flags = __le16_to_cpu(rsp->flags);
4377 result = __le16_to_cpu(rsp->result);
4378
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004379 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4380 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004382 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4383 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 return 0;
4385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004386 pi = l2cap_pi(sk);
4387
4388 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4389 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4390 goto done;
4391 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004392
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 switch (result) {
4394 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004395 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4396 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4397 /* Lockstep procedure requires a pending response
4398 * before success.
4399 */
4400 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4401 goto done;
4402 }
4403
4404 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 break;
4406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004407 case L2CAP_CONF_PENDING:
4408 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4409 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4410 goto done;
4411 }
4412
4413 l2cap_conf_rfc_get(sk, rsp->data, len);
4414
4415 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4416
Peter Krystadf453bb32011-07-19 17:23:34 -07004417 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004418
4419 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4420 struct hci_chan *chan;
4421
4422 /* Already sent a 'pending' response, so set up
4423 * the logical link now
4424 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004425 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004426 if (!chan) {
4427 l2cap_send_disconn_req(pi->conn, sk,
4428 ECONNRESET);
4429 goto done;
4430 }
4431
4432 chan->l2cap_sk = sk;
4433 if (chan->state == BT_CONNECTED)
4434 l2cap_create_cfm(chan, 0);
4435 }
4436
4437 goto done;
4438
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004440 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004441 char req[64];
4442
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004443 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004444 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004445 goto done;
4446 }
4447
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004448 /* throw out any old stored conf requests */
4449 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004450 len = l2cap_parse_conf_rsp(sk, rsp->data,
4451 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004452 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004453 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004454 goto done;
4455 }
4456
4457 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4458 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004459 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004460 if (result != L2CAP_CONF_SUCCESS)
4461 goto done;
4462 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 }
4464
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004465 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004466 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004467 l2cap_sock_set_timer(sk, HZ * 5);
4468 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 goto done;
4470 }
4471
4472 if (flags & 0x01)
4473 goto done;
4474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004475 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004477 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4478 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004480 sk->sk_state = BT_CONNECTED;
4481
4482 if (pi->mode == L2CAP_MODE_ERTM ||
4483 pi->mode == L2CAP_MODE_STREAMING)
4484 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004485
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 l2cap_chan_ready(sk);
4487 }
4488
4489done:
4490 bh_unlock_sock(sk);
4491 return 0;
4492}
4493
4494static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4495{
4496 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4497 struct l2cap_disconn_rsp rsp;
4498 u16 dcid, scid;
4499 struct sock *sk;
4500
4501 scid = __le16_to_cpu(req->scid);
4502 dcid = __le16_to_cpu(req->dcid);
4503
4504 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004506 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4507 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 return 0;
4509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004510 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4511 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4513
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004514 /* Only do cleanup if a disconnect request was not sent already */
4515 if (sk->sk_state != BT_DISCONN) {
4516 sk->sk_shutdown = SHUTDOWN_MASK;
4517
4518 skb_queue_purge(TX_QUEUE(sk));
4519
4520 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4521 skb_queue_purge(SREJ_QUEUE(sk));
4522
4523 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4524 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4525 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4526 }
4527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004529 /* don't delete l2cap channel if sk is owned by user */
4530 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004531 sk->sk_state = BT_DISCONN;
4532 l2cap_sock_clear_timer(sk);
4533 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004534 bh_unlock_sock(sk);
4535 return 0;
4536 }
4537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004538 l2cap_chan_del(sk, ECONNRESET);
4539
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 bh_unlock_sock(sk);
4541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004542 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543 return 0;
4544}
4545
4546static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4547{
4548 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4549 u16 dcid, scid;
4550 struct sock *sk;
4551
4552 scid = __le16_to_cpu(rsp->scid);
4553 dcid = __le16_to_cpu(rsp->dcid);
4554
4555 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004557 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4558 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 return 0;
4560
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004561 /* don't delete l2cap channel if sk is owned by user */
4562 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004563 sk->sk_state = BT_DISCONN;
4564 l2cap_sock_clear_timer(sk);
4565 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004566 bh_unlock_sock(sk);
4567 return 0;
4568 }
4569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004570 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 bh_unlock_sock(sk);
4572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004573 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 return 0;
4575}
4576
4577static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4578{
4579 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 u16 type;
4581
4582 type = __le16_to_cpu(req->type);
4583
4584 BT_DBG("type 0x%4.4x", type);
4585
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004586 if (type == L2CAP_IT_FEAT_MASK) {
4587 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004588 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004589 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4590 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4591 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004592 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004593 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004594 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004595 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004596 l2cap_send_cmd(conn, cmd->ident,
4597 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004598 } else if (type == L2CAP_IT_FIXED_CHAN) {
4599 u8 buf[12];
4600 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4601 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4602 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4603 memcpy(buf + 4, l2cap_fixed_chan, 8);
4604 l2cap_send_cmd(conn, cmd->ident,
4605 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004606 } else {
4607 struct l2cap_info_rsp rsp;
4608 rsp.type = cpu_to_le16(type);
4609 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4610 l2cap_send_cmd(conn, cmd->ident,
4611 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613
4614 return 0;
4615}
4616
4617static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4618{
4619 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4620 u16 type, result;
4621
4622 type = __le16_to_cpu(rsp->type);
4623 result = __le16_to_cpu(rsp->result);
4624
4625 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4626
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004627 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4628 if (cmd->ident != conn->info_ident ||
4629 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4630 return 0;
4631
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004632 del_timer(&conn->info_timer);
4633
Ville Tervoadb08ed2010-08-04 09:43:33 +03004634 if (result != L2CAP_IR_SUCCESS) {
4635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4636 conn->info_ident = 0;
4637
4638 l2cap_conn_start(conn);
4639
4640 return 0;
4641 }
4642
Marcel Holtmann984947d2009-02-06 23:35:19 +01004643 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004644 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004645
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004646 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004647 struct l2cap_info_req req;
4648 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4649
4650 conn->info_ident = l2cap_get_ident(conn);
4651
4652 l2cap_send_cmd(conn, conn->info_ident,
4653 L2CAP_INFO_REQ, sizeof(req), &req);
4654 } else {
4655 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4656 conn->info_ident = 0;
4657
4658 l2cap_conn_start(conn);
4659 }
4660 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004661 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004662 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004663 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004664
4665 l2cap_conn_start(conn);
4666 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004667
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668 return 0;
4669}
4670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004671static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4672 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4673{
4674 struct l2cap_move_chan_req req;
4675 u8 ident;
4676
4677 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4678 (int) dest_amp_id);
4679
4680 ident = l2cap_get_ident(conn);
4681 if (pi)
4682 pi->ident = ident;
4683
4684 req.icid = cpu_to_le16(icid);
4685 req.dest_amp_id = dest_amp_id;
4686
4687 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4688}
4689
4690static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4691 u16 icid, u16 result)
4692{
4693 struct l2cap_move_chan_rsp rsp;
4694
4695 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4696
4697 rsp.icid = cpu_to_le16(icid);
4698 rsp.result = cpu_to_le16(result);
4699
4700 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4701}
4702
4703static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4704 struct l2cap_pinfo *pi, u16 icid, u16 result)
4705{
4706 struct l2cap_move_chan_cfm cfm;
4707 u8 ident;
4708
4709 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4710
4711 ident = l2cap_get_ident(conn);
4712 if (pi)
4713 pi->ident = ident;
4714
4715 cfm.icid = cpu_to_le16(icid);
4716 cfm.result = cpu_to_le16(result);
4717
4718 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4719}
4720
4721static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4722 u16 icid)
4723{
4724 struct l2cap_move_chan_cfm_rsp rsp;
4725
4726 BT_DBG("icid %d", (int) icid);
4727
4728 rsp.icid = cpu_to_le16(icid);
4729 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4730}
4731
4732static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4733 struct l2cap_cmd_hdr *cmd, u8 *data)
4734{
4735 struct l2cap_create_chan_req *req =
4736 (struct l2cap_create_chan_req *) data;
4737 struct sock *sk;
4738 u16 psm, scid;
4739
4740 psm = le16_to_cpu(req->psm);
4741 scid = le16_to_cpu(req->scid);
4742
4743 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4744 (int) req->amp_id);
4745
4746 if (req->amp_id) {
4747 struct hci_dev *hdev;
4748
4749 /* Validate AMP controller id */
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004750 hdev = hci_dev_get(req->amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004751 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4752 struct l2cap_create_chan_rsp rsp;
4753
4754 rsp.dcid = 0;
4755 rsp.scid = cpu_to_le16(scid);
4756 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4757 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4758
4759 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4760 sizeof(rsp), &rsp);
4761
4762 if (hdev)
4763 hci_dev_put(hdev);
4764
4765 return 0;
4766 }
4767
4768 hci_dev_put(hdev);
4769 }
4770
4771 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4772 req->amp_id);
4773
Mat Martineau55f2a622011-09-19 13:20:17 -07004774 if (sk)
4775 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004776
Mat Martineau55f2a622011-09-19 13:20:17 -07004777 if (sk && req->amp_id &&
4778 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004779 amp_accept_physical(conn, req->amp_id, sk);
4780
4781 return 0;
4782}
4783
4784static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4785 struct l2cap_cmd_hdr *cmd, u8 *data)
4786{
4787 BT_DBG("conn %p", conn);
4788
4789 return l2cap_connect_rsp(conn, cmd, data);
4790}
4791
4792static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4793 struct l2cap_cmd_hdr *cmd, u8 *data)
4794{
4795 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4796 struct sock *sk;
4797 struct l2cap_pinfo *pi;
4798 u16 icid = 0;
4799 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4800
4801 icid = le16_to_cpu(req->icid);
4802
4803 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4804
4805 read_lock(&conn->chan_list.lock);
4806 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4807 read_unlock(&conn->chan_list.lock);
4808
4809 if (!sk)
4810 goto send_move_response;
4811
4812 lock_sock(sk);
4813 pi = l2cap_pi(sk);
4814
4815 if (pi->scid < L2CAP_CID_DYN_START ||
4816 (pi->mode != L2CAP_MODE_ERTM &&
4817 pi->mode != L2CAP_MODE_STREAMING)) {
4818 goto send_move_response;
4819 }
4820
4821 if (pi->amp_id == req->dest_amp_id) {
4822 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4823 goto send_move_response;
4824 }
4825
4826 if (req->dest_amp_id) {
4827 struct hci_dev *hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004828 hdev = hci_dev_get(req->dest_amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004829 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4830 if (hdev)
4831 hci_dev_put(hdev);
4832
4833 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4834 goto send_move_response;
4835 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08004836 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004837 }
4838
4839 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4840 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4841 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4842 bacmp(conn->src, conn->dst) > 0) {
4843 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4844 goto send_move_response;
4845 }
4846
4847 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4848 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4849 goto send_move_response;
4850 }
4851
4852 pi->amp_move_cmd_ident = cmd->ident;
4853 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4854 l2cap_amp_move_setup(sk);
4855 pi->amp_move_id = req->dest_amp_id;
4856 icid = pi->dcid;
4857
4858 if (req->dest_amp_id == 0) {
4859 /* Moving to BR/EDR */
4860 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4861 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4862 result = L2CAP_MOVE_CHAN_PENDING;
4863 } else {
4864 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4865 result = L2CAP_MOVE_CHAN_SUCCESS;
4866 }
4867 } else {
4868 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4869 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4870 result = L2CAP_MOVE_CHAN_PENDING;
4871 }
4872
4873send_move_response:
4874 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4875
4876 if (sk)
4877 release_sock(sk);
4878
4879 return 0;
4880}
4881
4882static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4883 struct l2cap_cmd_hdr *cmd, u8 *data)
4884{
4885 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4886 struct sock *sk;
4887 struct l2cap_pinfo *pi;
4888 u16 icid, result;
4889
4890 icid = le16_to_cpu(rsp->icid);
4891 result = le16_to_cpu(rsp->result);
4892
4893 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4894
4895 switch (result) {
4896 case L2CAP_MOVE_CHAN_SUCCESS:
4897 case L2CAP_MOVE_CHAN_PENDING:
4898 read_lock(&conn->chan_list.lock);
4899 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4900 read_unlock(&conn->chan_list.lock);
4901
4902 if (!sk) {
4903 l2cap_send_move_chan_cfm(conn, NULL, icid,
4904 L2CAP_MOVE_CHAN_UNCONFIRMED);
4905 break;
4906 }
4907
4908 lock_sock(sk);
4909 pi = l2cap_pi(sk);
4910
4911 l2cap_sock_clear_timer(sk);
4912 if (result == L2CAP_MOVE_CHAN_PENDING)
4913 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4914
4915 if (pi->amp_move_state ==
4916 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4917 /* Move confirm will be sent when logical link
4918 * is complete.
4919 */
4920 pi->amp_move_state =
4921 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4922 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4923 pi->amp_move_state ==
4924 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4925 /* Logical link is up or moving to BR/EDR,
4926 * proceed with move */
4927 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4928 pi->amp_move_state =
4929 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4930 } else {
4931 pi->amp_move_state =
4932 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4933 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4934 L2CAP_MOVE_CHAN_CONFIRMED);
4935 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4936 }
4937 } else if (pi->amp_move_state ==
4938 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4939 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4940 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4941 struct hci_chan *chan;
4942 /* Moving to AMP */
4943 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4944 /* Remote is ready, send confirm immediately
4945 * after logical link is ready
4946 */
4947 pi->amp_move_state =
4948 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4949 } else {
4950 /* Both logical link and move success
4951 * are required to confirm
4952 */
4953 pi->amp_move_state =
4954 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4955 }
4956 pi->remote_fs = default_fs;
4957 pi->local_fs = default_fs;
4958 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4959 if (!chan) {
4960 /* Logical link not available */
4961 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4962 L2CAP_MOVE_CHAN_UNCONFIRMED);
4963 break;
4964 }
4965 if (chan->state == BT_CONNECTED) {
4966 /* Logical link is already ready to go */
4967 pi->ampchan = chan;
4968 pi->ampcon = chan->conn;
4969 pi->ampcon->l2cap_data = pi->conn;
4970 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4971 /* Can confirm now */
4972 l2cap_send_move_chan_cfm(conn, pi,
4973 pi->scid,
4974 L2CAP_MOVE_CHAN_CONFIRMED);
4975 } else {
4976 /* Now only need move success
4977 * required to confirm
4978 */
4979 pi->amp_move_state =
4980 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4981 }
4982 } else
4983 chan->l2cap_sk = sk;
4984 } else {
4985 /* Any other amp move state means the move failed. */
4986 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4987 L2CAP_MOVE_CHAN_UNCONFIRMED);
4988 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4989 }
4990 break;
4991 default:
4992 /* Failed (including collision case) */
4993 read_lock(&conn->chan_list.lock);
4994 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4995 read_unlock(&conn->chan_list.lock);
4996
4997 if (!sk) {
4998 /* Could not locate channel, icid is best guess */
4999 l2cap_send_move_chan_cfm(conn, NULL, icid,
5000 L2CAP_MOVE_CHAN_UNCONFIRMED);
5001 break;
5002 }
5003
5004 lock_sock(sk);
5005 pi = l2cap_pi(sk);
5006
5007 l2cap_sock_clear_timer(sk);
5008
5009 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5010 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
5011 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
5012 else {
5013 /* Cleanup - cancel move */
5014 pi->amp_move_id = pi->amp_id;
5015 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5016 l2cap_amp_move_revert(sk);
5017 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5018 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005019 }
5020
5021 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5022 L2CAP_MOVE_CHAN_UNCONFIRMED);
5023 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5024 break;
5025 }
5026
5027 if (sk)
5028 release_sock(sk);
5029
5030 return 0;
5031}
5032
5033static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5034 struct l2cap_cmd_hdr *cmd, u8 *data)
5035{
5036 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
5037 struct sock *sk;
5038 u16 icid, result;
5039
5040 icid = le16_to_cpu(cfm->icid);
5041 result = le16_to_cpu(cfm->result);
5042
5043 BT_DBG("icid %d, result %d", (int) icid, (int) result);
5044
5045 read_lock(&conn->chan_list.lock);
5046 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
5047 read_unlock(&conn->chan_list.lock);
5048
5049 if (!sk) {
5050 BT_DBG("Bad channel (%d)", (int) icid);
5051 goto send_move_confirm_response;
5052 }
5053
5054 lock_sock(sk);
5055
5056 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
5057 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5058 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
5059 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5060 if ((!l2cap_pi(sk)->amp_id) &&
5061 (l2cap_pi(sk)->ampchan)) {
5062 /* Have moved off of AMP, free the channel */
Peter Krystadd6a9ceb2011-12-01 15:44:54 -08005063 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005064 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5065 l2cap_pi(sk));
5066 l2cap_pi(sk)->ampchan = NULL;
5067 l2cap_pi(sk)->ampcon = NULL;
5068 }
5069 l2cap_amp_move_success(sk);
5070 } else {
5071 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5072 l2cap_amp_move_revert(sk);
5073 }
5074 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5075 } else if (l2cap_pi(sk)->amp_move_state ==
5076 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5077 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5078 }
5079
5080send_move_confirm_response:
5081 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5082
5083 if (sk)
5084 release_sock(sk);
5085
5086 return 0;
5087}
5088
5089static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd, u8 *data)
5091{
5092 struct l2cap_move_chan_cfm_rsp *rsp =
5093 (struct l2cap_move_chan_cfm_rsp *) data;
5094 struct sock *sk;
5095 u16 icid;
5096
5097 icid = le16_to_cpu(rsp->icid);
5098
5099 BT_DBG("icid %d", (int) icid);
5100
5101 read_lock(&conn->chan_list.lock);
5102 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5103 read_unlock(&conn->chan_list.lock);
5104
5105 if (!sk)
5106 return 0;
5107
5108 lock_sock(sk);
5109
5110 l2cap_sock_clear_timer(sk);
5111
5112 if (l2cap_pi(sk)->amp_move_state ==
5113 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5114 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5115 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5116
5117 if (!l2cap_pi(sk)->amp_id) {
5118 /* Have moved off of AMP, free the channel */
5119 l2cap_pi(sk)->ampcon = NULL;
5120 if (l2cap_pi(sk)->ampchan) {
Peter Krystadd6a9ceb2011-12-01 15:44:54 -08005121 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005122 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5123 l2cap_pi(sk));
5124 }
5125 l2cap_pi(sk)->ampchan = NULL;
5126 }
5127
5128 l2cap_amp_move_success(sk);
5129
5130 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5131 }
5132
5133 release_sock(sk);
5134
5135 return 0;
5136}
5137
5138static void l2cap_amp_signal_worker(struct work_struct *work)
5139{
5140 int err = 0;
5141 struct l2cap_amp_signal_work *ampwork =
5142 container_of(work, struct l2cap_amp_signal_work, work);
5143
5144 switch (ampwork->cmd.code) {
5145 case L2CAP_MOVE_CHAN_REQ:
5146 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5147 ampwork->data);
5148 break;
5149
5150 case L2CAP_MOVE_CHAN_RSP:
5151 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5152 ampwork->data);
5153 break;
5154
5155 case L2CAP_MOVE_CHAN_CFM:
5156 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5157 ampwork->data);
5158 break;
5159
5160 case L2CAP_MOVE_CHAN_CFM_RSP:
5161 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5162 &ampwork->cmd, ampwork->data);
5163 break;
5164
5165 default:
5166 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5167 err = -EINVAL;
5168 break;
5169 }
5170
5171 if (err) {
5172 struct l2cap_cmd_rej rej;
5173 BT_DBG("error %d", err);
5174
5175 /* In this context, commands are only rejected with
5176 * "command not understood", code 0.
5177 */
5178 rej.reason = cpu_to_le16(0);
5179 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5180 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5181 }
5182
5183 kfree_skb(ampwork->skb);
5184 kfree(ampwork);
5185}
5186
5187void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5188 struct sock *sk)
5189{
5190 struct l2cap_pinfo *pi;
5191
5192 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5193 (int) local_id, (int) remote_id, sk);
5194
5195 lock_sock(sk);
5196
5197 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5198 release_sock(sk);
5199 return;
5200 }
5201
5202 pi = l2cap_pi(sk);
5203
5204 if (sk->sk_state != BT_CONNECTED) {
5205 if (bt_sk(sk)->parent) {
5206 struct l2cap_conn_rsp rsp;
5207 char buf[128];
5208 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5209 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5210
5211 /* Incoming channel on AMP */
5212 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5213 /* Send successful response */
5214 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5215 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5216 } else {
5217 /* Send negative response */
5218 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5219 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5220 }
5221
5222 l2cap_send_cmd(pi->conn, pi->ident,
5223 L2CAP_CREATE_CHAN_RSP,
5224 sizeof(rsp), &rsp);
5225
5226 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5227 sk->sk_state = BT_CONFIG;
5228 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5229 l2cap_send_cmd(pi->conn,
5230 l2cap_get_ident(pi->conn),
5231 L2CAP_CONF_REQ,
5232 l2cap_build_conf_req(sk, buf), buf);
5233 l2cap_pi(sk)->num_conf_req++;
5234 }
5235 } else {
5236 /* Outgoing channel on AMP */
5237 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5238 /* Revert to BR/EDR connect */
5239 l2cap_send_conn_req(sk);
5240 } else {
5241 pi->amp_id = local_id;
5242 l2cap_send_create_chan_req(sk, remote_id);
5243 }
5244 }
5245 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5246 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5247 l2cap_amp_move_setup(sk);
5248 pi->amp_move_id = local_id;
5249 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5250
5251 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5252 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5253 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5254 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5255 struct hci_chan *chan;
5256 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5257 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5258 pi->remote_fs = default_fs;
5259 pi->local_fs = default_fs;
5260 chan = l2cap_chan_admit(local_id, pi);
5261 if (chan) {
5262 if (chan->state == BT_CONNECTED) {
5263 /* Logical link is ready to go */
5264 pi->ampchan = chan;
5265 pi->ampcon = chan->conn;
5266 pi->ampcon->l2cap_data = pi->conn;
5267 pi->amp_move_state =
5268 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5269 l2cap_send_move_chan_rsp(pi->conn,
5270 pi->amp_move_cmd_ident, pi->dcid,
5271 L2CAP_MOVE_CHAN_SUCCESS);
5272 } else {
5273 /* Wait for logical link to be ready */
5274 chan->l2cap_sk = sk;
5275 pi->amp_move_state =
5276 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5277 }
5278 } else {
5279 /* Logical link not available */
5280 l2cap_send_move_chan_rsp(pi->conn,
5281 pi->amp_move_cmd_ident, pi->dcid,
5282 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5283 }
5284 } else {
5285 BT_DBG("result %d, role %d, local_busy %d", result,
5286 (int) pi->amp_move_role,
5287 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5288
5289 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5290 if (result == -EINVAL)
5291 l2cap_send_move_chan_rsp(pi->conn,
5292 pi->amp_move_cmd_ident, pi->dcid,
5293 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5294 else
5295 l2cap_send_move_chan_rsp(pi->conn,
5296 pi->amp_move_cmd_ident, pi->dcid,
5297 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5298 }
5299
5300 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5301 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5302
5303 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5304 l2cap_rmem_available(sk))
5305 l2cap_ertm_tx(sk, 0, 0,
5306 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5307
5308 /* Restart data transmission */
5309 l2cap_ertm_send(sk);
5310 }
5311
5312 release_sock(sk);
5313}
5314
5315int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5316{
5317 struct l2cap_pinfo *pi;
5318 struct sock *sk;
5319
5320 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5321
5322 sk = chan->l2cap_sk;
5323
5324 BT_DBG("sk %p", sk);
5325
5326 lock_sock(sk);
5327
5328 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5329 release_sock(sk);
5330 return 0;
5331 }
5332
5333 pi = l2cap_pi(sk);
5334
5335 if ((!status) && (chan != NULL)) {
5336 pi->ampchan = chan;
5337 pi->ampcon = chan->conn;
5338 pi->ampcon->l2cap_data = pi->conn;
5339
5340 if (sk->sk_state != BT_CONNECTED) {
5341 struct l2cap_conf_rsp rsp;
5342
5343 /* Must use spinlock to prevent concurrent
5344 * execution of l2cap_config_rsp()
5345 */
5346 bh_lock_sock(sk);
5347 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5348 l2cap_build_conf_rsp(sk, &rsp,
5349 L2CAP_CONF_SUCCESS, 0), &rsp);
5350 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5351
5352 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5353 set_default_fcs(l2cap_pi(sk));
5354
5355 sk->sk_state = BT_CONNECTED;
5356
5357 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5358 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5359 l2cap_ertm_init(sk);
5360
5361 l2cap_chan_ready(sk);
5362 }
5363 bh_unlock_sock(sk);
5364 } else if (pi->amp_move_state ==
5365 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5366 /* Move confirm will be sent after a success
5367 * response is received
5368 */
5369 pi->amp_move_state =
5370 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5371 } else if (pi->amp_move_state ==
5372 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5373 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5374 pi->amp_move_state =
5375 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5376 else if (pi->amp_move_role ==
5377 L2CAP_AMP_MOVE_INITIATOR) {
5378 pi->amp_move_state =
5379 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5380 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5381 L2CAP_MOVE_CHAN_SUCCESS);
5382 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5383 } else if (pi->amp_move_role ==
5384 L2CAP_AMP_MOVE_RESPONDER) {
5385 pi->amp_move_state =
5386 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5387 l2cap_send_move_chan_rsp(pi->conn,
5388 pi->amp_move_cmd_ident, pi->dcid,
5389 L2CAP_MOVE_CHAN_SUCCESS);
5390 }
5391 } else {
5392 /* Move was not in expected state, free the
5393 * logical link
5394 */
5395 hci_chan_put(pi->ampchan);
5396 pi->ampcon = NULL;
5397 pi->ampchan = NULL;
5398 }
5399 } else {
5400 /* Logical link setup failed. */
5401
5402 if (sk->sk_state != BT_CONNECTED)
5403 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5404 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5405 l2cap_amp_move_revert(sk);
5406 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5407 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5408 l2cap_send_move_chan_rsp(pi->conn,
5409 pi->amp_move_cmd_ident, pi->dcid,
5410 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5411 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5412 if ((pi->amp_move_state ==
5413 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5414 (pi->amp_move_state ==
5415 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5416 /* Remote has only sent pending or
5417 * success responses, clean up
5418 */
5419 l2cap_amp_move_revert(sk);
5420 l2cap_pi(sk)->amp_move_role =
5421 L2CAP_AMP_MOVE_NONE;
5422 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5423 }
5424
5425 /* Other amp move states imply that the move
5426 * has already aborted
5427 */
5428 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5429 L2CAP_MOVE_CHAN_UNCONFIRMED);
5430 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5431 }
5432
5433 pi->ampcon = NULL;
5434 pi->ampchan = NULL;
5435 }
5436
5437 release_sock(sk);
5438 return 0;
5439}
5440
5441static void l2cap_logical_link_worker(struct work_struct *work)
5442{
5443 struct l2cap_logical_link_work *log_link_work =
5444 container_of(work, struct l2cap_logical_link_work, work);
5445
5446 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5447 kfree(log_link_work);
5448}
5449
5450static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5451{
5452 struct l2cap_logical_link_work *amp_work;
5453
5454 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5455 if (!amp_work)
5456 return -ENOMEM;
5457
5458 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5459 amp_work->chan = chan;
5460 amp_work->status = status;
5461 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5462 kfree(amp_work);
5463 return -ENOMEM;
5464 }
5465
5466 return 0;
5467}
5468
5469int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5470{
5471 struct l2cap_conn *conn = chan->conn->l2cap_data;
5472
5473 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5474
5475 /* TODO: if failed status restore previous fs */
5476 return 0;
5477}
5478
5479int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5480{
5481 struct l2cap_chan_list *l;
5482 struct l2cap_conn *conn = chan->conn->l2cap_data;
5483 struct sock *sk;
5484
5485 BT_DBG("chan %p conn %p", chan, conn);
5486
5487 if (!conn)
5488 return 0;
5489
5490 l = &conn->chan_list;
5491
5492 read_lock(&l->lock);
5493
5494 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5495 bh_lock_sock(sk);
5496 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5497 if (l2cap_pi(sk)->ampchan == chan) {
5498 l2cap_pi(sk)->ampchan = NULL;
Peter Krystad1f8a8a52011-12-01 14:18:37 -08005499 l2cap_pi(sk)->ampcon = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005500 l2cap_amp_move_init(sk);
5501 }
5502 bh_unlock_sock(sk);
5503 }
5504
5505 read_unlock(&l->lock);
5506
5507 return 0;
5508
5509
5510}
5511
5512static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5513 u8 *data, struct sk_buff *skb)
5514{
5515 struct l2cap_amp_signal_work *amp_work;
5516
5517 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5518 if (!amp_work)
5519 return -ENOMEM;
5520
5521 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5522 amp_work->conn = conn;
5523 amp_work->cmd = *cmd;
5524 amp_work->data = data;
5525 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5526 if (!amp_work->skb) {
5527 kfree(amp_work);
5528 return -ENOMEM;
5529 }
5530
5531 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5532 kfree_skb(amp_work->skb);
5533 kfree(amp_work);
5534 return -ENOMEM;
5535 }
5536
5537 return 0;
5538}
5539
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005540static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005541 u16 to_multiplier)
5542{
5543 u16 max_latency;
5544
5545 if (min > max || min < 6 || max > 3200)
5546 return -EINVAL;
5547
5548 if (to_multiplier < 10 || to_multiplier > 3200)
5549 return -EINVAL;
5550
5551 if (max >= to_multiplier * 8)
5552 return -EINVAL;
5553
5554 max_latency = (to_multiplier * 8 / max) - 1;
5555 if (latency > 499 || latency > max_latency)
5556 return -EINVAL;
5557
5558 return 0;
5559}
5560
5561static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5562 struct l2cap_cmd_hdr *cmd, u8 *data)
5563{
5564 struct hci_conn *hcon = conn->hcon;
5565 struct l2cap_conn_param_update_req *req;
5566 struct l2cap_conn_param_update_rsp rsp;
5567 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005568 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005569
5570 if (!(hcon->link_mode & HCI_LM_MASTER))
5571 return -EINVAL;
5572
5573 cmd_len = __le16_to_cpu(cmd->len);
5574 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5575 return -EPROTO;
5576
5577 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005578 min = __le16_to_cpu(req->min);
5579 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005580 latency = __le16_to_cpu(req->latency);
5581 to_multiplier = __le16_to_cpu(req->to_multiplier);
5582
5583 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5584 min, max, latency, to_multiplier);
5585
5586 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005587
5588 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5589 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005590 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5591 else
5592 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5593
5594 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5595 sizeof(rsp), &rsp);
5596
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005597 if (!err)
5598 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5599
Claudio Takahaside731152011-02-11 19:28:55 -02005600 return 0;
5601}
5602
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005603static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005604 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5605 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005606{
5607 int err = 0;
5608
5609 switch (cmd->code) {
5610 case L2CAP_COMMAND_REJ:
5611 l2cap_command_rej(conn, cmd, data);
5612 break;
5613
5614 case L2CAP_CONN_REQ:
5615 err = l2cap_connect_req(conn, cmd, data);
5616 break;
5617
5618 case L2CAP_CONN_RSP:
5619 err = l2cap_connect_rsp(conn, cmd, data);
5620 break;
5621
5622 case L2CAP_CONF_REQ:
5623 err = l2cap_config_req(conn, cmd, cmd_len, data);
5624 break;
5625
5626 case L2CAP_CONF_RSP:
5627 err = l2cap_config_rsp(conn, cmd, data);
5628 break;
5629
5630 case L2CAP_DISCONN_REQ:
5631 err = l2cap_disconnect_req(conn, cmd, data);
5632 break;
5633
5634 case L2CAP_DISCONN_RSP:
5635 err = l2cap_disconnect_rsp(conn, cmd, data);
5636 break;
5637
5638 case L2CAP_ECHO_REQ:
5639 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5640 break;
5641
5642 case L2CAP_ECHO_RSP:
5643 break;
5644
5645 case L2CAP_INFO_REQ:
5646 err = l2cap_information_req(conn, cmd, data);
5647 break;
5648
5649 case L2CAP_INFO_RSP:
5650 err = l2cap_information_rsp(conn, cmd, data);
5651 break;
5652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005653 case L2CAP_CREATE_CHAN_REQ:
5654 err = l2cap_create_channel_req(conn, cmd, data);
5655 break;
5656
5657 case L2CAP_CREATE_CHAN_RSP:
5658 err = l2cap_create_channel_rsp(conn, cmd, data);
5659 break;
5660
5661 case L2CAP_MOVE_CHAN_REQ:
5662 case L2CAP_MOVE_CHAN_RSP:
5663 case L2CAP_MOVE_CHAN_CFM:
5664 case L2CAP_MOVE_CHAN_CFM_RSP:
5665 err = l2cap_sig_amp(conn, cmd, data, skb);
5666 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005667 default:
5668 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5669 err = -EINVAL;
5670 break;
5671 }
5672
5673 return err;
5674}
5675
5676static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5677 struct l2cap_cmd_hdr *cmd, u8 *data)
5678{
5679 switch (cmd->code) {
5680 case L2CAP_COMMAND_REJ:
5681 return 0;
5682
5683 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005684 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005685
5686 case L2CAP_CONN_PARAM_UPDATE_RSP:
5687 return 0;
5688
5689 default:
5690 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5691 return -EINVAL;
5692 }
5693}
5694
5695static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5696 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697{
5698 u8 *data = skb->data;
5699 int len = skb->len;
5700 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005701 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702
5703 l2cap_raw_recv(conn, skb);
5704
5705 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005706 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5708 data += L2CAP_CMD_HDR_SIZE;
5709 len -= L2CAP_CMD_HDR_SIZE;
5710
Al Viro88219a02007-07-29 00:17:25 -07005711 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712
Al Viro88219a02007-07-29 00:17:25 -07005713 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714
Al Viro88219a02007-07-29 00:17:25 -07005715 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716 BT_DBG("corrupted command");
5717 break;
5718 }
5719
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005720 if (conn->hcon->type == LE_LINK)
5721 err = l2cap_le_sig_cmd(conn, &cmd, data);
5722 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005723 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5724 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725
5726 if (err) {
5727 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005728
5729 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730
5731 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005732 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5734 }
5735
Al Viro88219a02007-07-29 00:17:25 -07005736 data += cmd_len;
5737 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 }
5739
5740 kfree_skb(skb);
5741}
5742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005743static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005744{
5745 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005746 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005747
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005748 if (pi->extended_control)
5749 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5750 else
5751 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5752
5753 if (pi->fcs == L2CAP_FCS_CRC16) {
5754 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005755 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5756 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005758 if (our_fcs != rcv_fcs) {
5759 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005760 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005761 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005762 }
5763 return 0;
5764}
5765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005766static void l2cap_ertm_pass_to_tx(struct sock *sk,
5767 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005768{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005769 BT_DBG("sk %p, control %p", sk, control);
5770 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005771}
5772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005773static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5774 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005775{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005776 BT_DBG("sk %p, control %p", sk, control);
5777 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5778}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005780static void l2cap_ertm_resend(struct sock *sk)
5781{
5782 struct bt_l2cap_control control;
5783 struct l2cap_pinfo *pi;
5784 struct sk_buff *skb;
5785 struct sk_buff *tx_skb;
5786 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005788 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005790 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005792 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5793 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005795 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5796 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5797 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005799 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5800 seq = l2cap_seq_list_pop(&pi->retrans_list);
5801
5802 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5803 if (!skb) {
5804 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5805 (int) seq);
5806 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005807 }
5808
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005809 bt_cb(skb)->retries += 1;
5810 control = bt_cb(skb)->control;
5811
5812 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5813 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5814 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5815 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005816 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005817 }
5818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005819 control.reqseq = pi->buffer_seq;
5820 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5821 control.final = 1;
5822 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5823 } else {
5824 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005825 }
5826
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005827 if (skb_cloned(skb)) {
5828 /* Cloned sk_buffs are read-only, so we need a
5829 * writeable copy
5830 */
5831 tx_skb = skb_copy(skb, GFP_ATOMIC);
5832 } else {
5833 tx_skb = skb_clone(skb, GFP_ATOMIC);
5834 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005835
Mat Martineau0c04ef92011-12-07 16:41:22 -08005836 if (!tx_skb) {
5837 l2cap_seq_list_clear(&pi->retrans_list);
5838 break;
5839 }
5840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005841 /* Update skb contents */
5842 if (pi->extended_control) {
5843 put_unaligned_le32(__pack_extended_control(&control),
5844 tx_skb->data + L2CAP_HDR_SIZE);
5845 } else {
5846 put_unaligned_le16(__pack_enhanced_control(&control),
5847 tx_skb->data + L2CAP_HDR_SIZE);
5848 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005850 if (pi->fcs == L2CAP_FCS_CRC16)
5851 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005852
Mat Martineau2f0cd842011-10-20 14:34:26 -07005853 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005854 tx_skb->sk = sk;
5855 tx_skb->destructor = l2cap_skb_destructor;
5856 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005858 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005860 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005861
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005862 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005863 }
5864}
5865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005866static inline void l2cap_ertm_retransmit(struct sock *sk,
5867 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005868{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005869 BT_DBG("sk %p, control %p", sk, control);
5870
5871 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5872 l2cap_ertm_resend(sk);
5873}
5874
5875static void l2cap_ertm_retransmit_all(struct sock *sk,
5876 struct bt_l2cap_control *control)
5877{
5878 struct l2cap_pinfo *pi;
5879 struct sk_buff *skb;
5880
5881 BT_DBG("sk %p, control %p", sk, control);
5882
5883 pi = l2cap_pi(sk);
5884
5885 if (control->poll)
5886 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5887
5888 l2cap_seq_list_clear(&pi->retrans_list);
5889
5890 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5891 return;
5892
5893 if (pi->unacked_frames) {
5894 skb_queue_walk(TX_QUEUE(sk), skb) {
5895 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5896 skb == sk->sk_send_head)
5897 break;
5898 }
5899
5900 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5901 if (skb == sk->sk_send_head)
5902 break;
5903
5904 l2cap_seq_list_append(&pi->retrans_list,
5905 bt_cb(skb)->control.txseq);
5906 }
5907
5908 l2cap_ertm_resend(sk);
5909 }
5910}
5911
5912static inline void append_skb_frag(struct sk_buff *skb,
5913 struct sk_buff *new_frag, struct sk_buff **last_frag)
5914{
5915 /* skb->len reflects data in skb as well as all fragments
5916 skb->data_len reflects only data in fragments
5917 */
5918 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5919
5920 if (!skb_has_frag_list(skb))
5921 skb_shinfo(skb)->frag_list = new_frag;
5922
5923 new_frag->next = NULL;
5924
5925 (*last_frag)->next = new_frag;
5926 *last_frag = new_frag;
5927
5928 skb->len += new_frag->len;
5929 skb->data_len += new_frag->len;
5930 skb->truesize += new_frag->truesize;
5931}
5932
5933static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5934 struct bt_l2cap_control *control, struct sk_buff *skb)
5935{
5936 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005937 int err = -EINVAL;
5938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005939 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5940 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005942 if (!control)
5943 return err;
5944
5945 pi = l2cap_pi(sk);
5946
5947 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5948 control->frame_type, control->sar, control->txseq,
5949 control->reqseq, control->final);
5950
5951 switch (control->sar) {
5952 case L2CAP_SAR_UNSEGMENTED:
5953 if (pi->sdu) {
5954 BT_DBG("Unexpected unsegmented PDU during reassembly");
5955 kfree_skb(pi->sdu);
5956 pi->sdu = NULL;
5957 pi->sdu_last_frag = NULL;
5958 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005959 }
5960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005961 BT_DBG("Unsegmented");
5962 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005963 break;
5964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005965 case L2CAP_SAR_START:
5966 if (pi->sdu) {
5967 BT_DBG("Unexpected start PDU during reassembly");
5968 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005969 }
5970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005971 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005972 skb_pull(skb, 2);
5973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005974 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005975 err = -EMSGSIZE;
5976 break;
5977 }
5978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005979 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005980 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005982 pi->sdu = skb;
5983 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005985 BT_DBG("Start");
5986
5987 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005988 err = 0;
5989 break;
5990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005991 case L2CAP_SAR_CONTINUE:
5992 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005993 break;
5994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005995 append_skb_frag(pi->sdu, skb,
5996 &pi->sdu_last_frag);
5997 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005999 if (pi->sdu->len >= pi->sdu_len)
6000 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006002 BT_DBG("Continue, reassembled %d", pi->sdu->len);
6003
6004 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006005 break;
6006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006007 case L2CAP_SAR_END:
6008 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006009 break;
6010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006011 append_skb_frag(pi->sdu, skb,
6012 &pi->sdu_last_frag);
6013 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006015 if (pi->sdu->len != pi->sdu_len)
6016 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006018 BT_DBG("End, reassembled %d", pi->sdu->len);
6019 /* If the sender used tiny PDUs, the rcv queuing could fail.
6020 * Applications that have issues here should use a larger
6021 * sk_rcvbuf.
6022 */
6023 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03006024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006025 if (!err) {
6026 /* Reassembly complete */
6027 pi->sdu = NULL;
6028 pi->sdu_last_frag = NULL;
6029 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006030 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006031 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006033 default:
6034 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006035 break;
6036 }
6037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006038 if (err) {
6039 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
6040 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
6041 if (pi->sdu) {
6042 kfree_skb(pi->sdu);
6043 pi->sdu = NULL;
6044 }
6045 pi->sdu_last_frag = NULL;
6046 pi->sdu_len = 0;
6047 if (skb)
6048 kfree_skb(skb);
6049 }
6050
6051 /* Update local busy state */
6052 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
6053 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
6054
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006055 return err;
6056}
6057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006058static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006059{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006060 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006061 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
6062 * until a gap is encountered.
6063 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006065 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006067 BT_DBG("sk %p", sk);
6068 pi = l2cap_pi(sk);
6069
6070 while (l2cap_rmem_available(sk)) {
6071 struct sk_buff *skb;
6072 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6073 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6074
6075 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6076
6077 if (!skb)
6078 break;
6079
6080 skb_unlink(skb, SREJ_QUEUE(sk));
6081 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6082 err = l2cap_ertm_rx_expected_iframe(sk,
6083 &bt_cb(skb)->control, skb);
6084 if (err)
6085 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006086 }
6087
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006088 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6089 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6090 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006091 }
6092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006093 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006094}
6095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006096static void l2cap_ertm_handle_srej(struct sock *sk,
6097 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006098{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006099 struct l2cap_pinfo *pi;
6100 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006102 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006104 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006106 if (control->reqseq == pi->next_tx_seq) {
6107 BT_DBG("Invalid reqseq %d, disconnecting",
6108 (int) control->reqseq);
6109 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006110 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006111 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006113 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006115 if (skb == NULL) {
6116 BT_DBG("Seq %d not available for retransmission",
6117 (int) control->reqseq);
6118 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006119 }
6120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006121 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6122 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6123 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6124 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006125 }
6126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006127 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006129 if (control->poll) {
6130 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006132 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6133 l2cap_ertm_retransmit(sk, control);
6134 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006136 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6137 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6138 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006139 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006140 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006141 l2cap_ertm_pass_to_tx_fbit(sk, control);
6142
6143 if (control->final) {
6144 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6145 (pi->srej_save_reqseq == control->reqseq)) {
6146 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6147 } else {
6148 l2cap_ertm_retransmit(sk, control);
6149 }
6150 } else {
6151 l2cap_ertm_retransmit(sk, control);
6152 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6153 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6154 pi->srej_save_reqseq = control->reqseq;
6155 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006156 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006157 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006158}
6159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006160static void l2cap_ertm_handle_rej(struct sock *sk,
6161 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006163 struct l2cap_pinfo *pi;
6164 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006166 BT_DBG("sk %p, control %p", sk, control);
6167
6168 pi = l2cap_pi(sk);
6169
6170 if (control->reqseq == pi->next_tx_seq) {
6171 BT_DBG("Invalid reqseq %d, disconnecting",
6172 (int) control->reqseq);
6173 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6174 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175 }
6176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006177 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006179 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6180 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6181 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6182 return;
6183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006185 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6186
6187 l2cap_ertm_pass_to_tx(sk, control);
6188
6189 if (control->final) {
6190 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6191 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6192 else
6193 l2cap_ertm_retransmit_all(sk, control);
6194 } else {
6195 l2cap_ertm_retransmit_all(sk, control);
6196 l2cap_ertm_send(sk);
6197 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6198 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6199 }
6200}
6201
6202static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6203{
6204 struct l2cap_pinfo *pi;
6205
6206 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6207 pi = l2cap_pi(sk);
6208
6209 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6210 (int)pi->expected_tx_seq);
6211
6212 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6213 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6214 /* See notes below regarding "double poll" and
6215 * invalid packets.
6216 */
6217 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6218 BT_DBG("Invalid/Ignore - txseq outside "
6219 "tx window after SREJ sent");
6220 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6221 } else {
6222 BT_DBG("Invalid - bad txseq within tx "
6223 "window after SREJ sent");
6224 return L2CAP_ERTM_TXSEQ_INVALID;
6225 }
6226 }
6227
6228 if (pi->srej_list.head == txseq) {
6229 BT_DBG("Expected SREJ");
6230 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6231 }
6232
6233 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6234 BT_DBG("Duplicate SREJ - txseq already stored");
6235 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6236 }
6237
6238 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6239 BT_DBG("Unexpected SREJ - txseq not requested "
6240 "with SREJ");
6241 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6242 }
6243 }
6244
6245 if (pi->expected_tx_seq == txseq) {
6246 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6247 BT_DBG("Invalid - txseq outside tx window");
6248 return L2CAP_ERTM_TXSEQ_INVALID;
6249 } else {
6250 BT_DBG("Expected");
6251 return L2CAP_ERTM_TXSEQ_EXPECTED;
6252 }
6253 }
6254
6255 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6256 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6257 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6258 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6259 }
6260
6261 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6262 /* A source of invalid packets is a "double poll" condition,
6263 * where delays cause us to send multiple poll packets. If
6264 * the remote stack receives and processes both polls,
6265 * sequence numbers can wrap around in such a way that a
6266 * resent frame has a sequence number that looks like new data
6267 * with a sequence gap. This would trigger an erroneous SREJ
6268 * request.
6269 *
6270 * Fortunately, this is impossible with a tx window that's
6271 * less than half of the maximum sequence number, which allows
6272 * invalid frames to be safely ignored.
6273 *
6274 * With tx window sizes greater than half of the tx window
6275 * maximum, the frame is invalid and cannot be ignored. This
6276 * causes a disconnect.
6277 */
6278
6279 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6280 BT_DBG("Invalid/Ignore - txseq outside tx window");
6281 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6282 } else {
6283 BT_DBG("Invalid - txseq outside tx window");
6284 return L2CAP_ERTM_TXSEQ_INVALID;
6285 }
6286 } else {
6287 BT_DBG("Unexpected - txseq indicates missing frames");
6288 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6289 }
6290}
6291
6292static int l2cap_ertm_rx_state_recv(struct sock *sk,
6293 struct bt_l2cap_control *control,
6294 struct sk_buff *skb, u8 event)
6295{
6296 struct l2cap_pinfo *pi;
6297 int err = 0;
6298 bool skb_in_use = 0;
6299
6300 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6301 (int)event);
6302 pi = l2cap_pi(sk);
6303
6304 switch (event) {
6305 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6306 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6307 case L2CAP_ERTM_TXSEQ_EXPECTED:
6308 l2cap_ertm_pass_to_tx(sk, control);
6309
6310 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6311 BT_DBG("Busy, discarding expected seq %d",
6312 control->txseq);
6313 break;
6314 }
6315
6316 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6317 pi->buffer_seq = pi->expected_tx_seq;
6318 skb_in_use = 1;
6319
6320 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6321 if (err)
6322 break;
6323
6324 if (control->final) {
6325 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6326 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6327 else {
6328 control->final = 0;
6329 l2cap_ertm_retransmit_all(sk, control);
6330 l2cap_ertm_send(sk);
6331 }
6332 }
6333
6334 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6335 l2cap_ertm_send_ack(sk);
6336 break;
6337 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6338 l2cap_ertm_pass_to_tx(sk, control);
6339
6340 /* Can't issue SREJ frames in the local busy state.
6341 * Drop this frame, it will be seen as missing
6342 * when local busy is exited.
6343 */
6344 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6345 BT_DBG("Busy, discarding unexpected seq %d",
6346 control->txseq);
6347 break;
6348 }
6349
6350 /* There was a gap in the sequence, so an SREJ
6351 * must be sent for each missing frame. The
6352 * current frame is stored for later use.
6353 */
6354 skb_queue_tail(SREJ_QUEUE(sk), skb);
6355 skb_in_use = 1;
6356 BT_DBG("Queued %p (queue len %d)", skb,
6357 skb_queue_len(SREJ_QUEUE(sk)));
6358
6359 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6360 l2cap_seq_list_clear(&pi->srej_list);
6361 l2cap_ertm_send_srej(sk, control->txseq);
6362
6363 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6364 break;
6365 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6366 l2cap_ertm_pass_to_tx(sk, control);
6367 break;
6368 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6369 break;
6370 case L2CAP_ERTM_TXSEQ_INVALID:
6371 default:
6372 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6373 ECONNRESET);
6374 break;
6375 }
6376 break;
6377 case L2CAP_ERTM_EVENT_RECV_RR:
6378 l2cap_ertm_pass_to_tx(sk, control);
6379 if (control->final) {
6380 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6381
6382 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6383 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6384 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6385 pi->amp_move_state ==
6386 L2CAP_AMP_STATE_WAIT_PREPARE) {
6387 control->final = 0;
6388 l2cap_ertm_retransmit_all(sk, control);
6389 }
6390
6391 l2cap_ertm_send(sk);
6392 } else if (control->poll) {
6393 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6394 } else {
6395 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6396 pi->unacked_frames)
6397 l2cap_ertm_start_retrans_timer(pi);
6398 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6399 l2cap_ertm_send(sk);
6400 }
6401 break;
6402 case L2CAP_ERTM_EVENT_RECV_RNR:
6403 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6404 l2cap_ertm_pass_to_tx(sk, control);
6405 if (control && control->poll) {
6406 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6407 l2cap_ertm_send_rr_or_rnr(sk, 0);
6408 }
6409 l2cap_ertm_stop_retrans_timer(pi);
6410 l2cap_seq_list_clear(&pi->retrans_list);
6411 break;
6412 case L2CAP_ERTM_EVENT_RECV_REJ:
6413 l2cap_ertm_handle_rej(sk, control);
6414 break;
6415 case L2CAP_ERTM_EVENT_RECV_SREJ:
6416 l2cap_ertm_handle_srej(sk, control);
6417 break;
6418 default:
6419 break;
6420 }
6421
6422 if (skb && !skb_in_use) {
6423 BT_DBG("Freeing %p", skb);
6424 kfree_skb(skb);
6425 }
6426
6427 return err;
6428}
6429
6430static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6431 struct bt_l2cap_control *control,
6432 struct sk_buff *skb, u8 event)
6433{
6434 struct l2cap_pinfo *pi;
6435 int err = 0;
6436 u16 txseq = control->txseq;
6437 bool skb_in_use = 0;
6438
6439 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6440 (int)event);
6441 pi = l2cap_pi(sk);
6442
6443 switch (event) {
6444 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6445 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6446 case L2CAP_ERTM_TXSEQ_EXPECTED:
6447 /* Keep frame for reassembly later */
6448 l2cap_ertm_pass_to_tx(sk, control);
6449 skb_queue_tail(SREJ_QUEUE(sk), skb);
6450 skb_in_use = 1;
6451 BT_DBG("Queued %p (queue len %d)", skb,
6452 skb_queue_len(SREJ_QUEUE(sk)));
6453
6454 pi->expected_tx_seq = __next_seq(txseq, pi);
6455 break;
6456 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6457 l2cap_seq_list_pop(&pi->srej_list);
6458
6459 l2cap_ertm_pass_to_tx(sk, control);
6460 skb_queue_tail(SREJ_QUEUE(sk), skb);
6461 skb_in_use = 1;
6462 BT_DBG("Queued %p (queue len %d)", skb,
6463 skb_queue_len(SREJ_QUEUE(sk)));
6464
6465 err = l2cap_ertm_rx_queued_iframes(sk);
6466 if (err)
6467 break;
6468
6469 break;
6470 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6471 /* Got a frame that can't be reassembled yet.
6472 * Save it for later, and send SREJs to cover
6473 * the missing frames.
6474 */
6475 skb_queue_tail(SREJ_QUEUE(sk), skb);
6476 skb_in_use = 1;
6477 BT_DBG("Queued %p (queue len %d)", skb,
6478 skb_queue_len(SREJ_QUEUE(sk)));
6479
6480 l2cap_ertm_pass_to_tx(sk, control);
6481 l2cap_ertm_send_srej(sk, control->txseq);
6482 break;
6483 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6484 /* This frame was requested with an SREJ, but
6485 * some expected retransmitted frames are
6486 * missing. Request retransmission of missing
6487 * SREJ'd frames.
6488 */
6489 skb_queue_tail(SREJ_QUEUE(sk), skb);
6490 skb_in_use = 1;
6491 BT_DBG("Queued %p (queue len %d)", skb,
6492 skb_queue_len(SREJ_QUEUE(sk)));
6493
6494 l2cap_ertm_pass_to_tx(sk, control);
6495 l2cap_ertm_send_srej_list(sk, control->txseq);
6496 break;
6497 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6498 /* We've already queued this frame. Drop this copy. */
6499 l2cap_ertm_pass_to_tx(sk, control);
6500 break;
6501 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6502 /* Expecting a later sequence number, so this frame
6503 * was already received. Ignore it completely.
6504 */
6505 break;
6506 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6507 break;
6508 case L2CAP_ERTM_TXSEQ_INVALID:
6509 default:
6510 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6511 ECONNRESET);
6512 break;
6513 }
6514 break;
6515 case L2CAP_ERTM_EVENT_RECV_RR:
6516 l2cap_ertm_pass_to_tx(sk, control);
6517 if (control->final) {
6518 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6519
6520 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6521 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6522 else {
6523 control->final = 0;
6524 l2cap_ertm_retransmit_all(sk, control);
6525 }
6526
6527 l2cap_ertm_send(sk);
6528 } else if (control->poll) {
6529 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6530 pi->unacked_frames) {
6531 l2cap_ertm_start_retrans_timer(pi);
6532 }
6533 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6534 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6535 l2cap_ertm_send_srej_tail(sk);
6536 } else {
6537 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6538 pi->unacked_frames) {
6539 l2cap_ertm_start_retrans_timer(pi);
6540 }
6541 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6542 l2cap_ertm_send_ack(sk);
6543 }
6544 break;
6545 case L2CAP_ERTM_EVENT_RECV_RNR:
6546 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6547 l2cap_ertm_pass_to_tx(sk, control);
6548 if (control->poll)
6549 l2cap_ertm_send_srej_tail(sk);
6550 else {
6551 struct bt_l2cap_control rr_control;
6552 memset(&rr_control, 0, sizeof(rr_control));
6553 rr_control.frame_type = 's';
6554 rr_control.super = L2CAP_SFRAME_RR;
6555 rr_control.reqseq = pi->buffer_seq;
6556 l2cap_ertm_send_sframe(sk, &rr_control);
6557 }
6558
6559 break;
6560 case L2CAP_ERTM_EVENT_RECV_REJ:
6561 l2cap_ertm_handle_rej(sk, control);
6562 break;
6563 case L2CAP_ERTM_EVENT_RECV_SREJ:
6564 l2cap_ertm_handle_srej(sk, control);
6565 break;
6566 }
6567
6568 if (skb && !skb_in_use) {
6569 BT_DBG("Freeing %p", skb);
6570 kfree_skb(skb);
6571 }
6572
6573 return err;
6574}
6575
6576static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6577 struct bt_l2cap_control *control,
6578 struct sk_buff *skb, u8 event)
6579{
6580 struct l2cap_pinfo *pi;
6581 int err = 0;
6582 bool skb_in_use = 0;
6583
6584 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6585 (int)event);
6586 pi = l2cap_pi(sk);
6587
6588 /* Only handle expected frames, to avoid state changes. */
6589
6590 switch (event) {
6591 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6592 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6593 L2CAP_ERTM_TXSEQ_EXPECTED) {
6594 l2cap_ertm_pass_to_tx(sk, control);
6595
6596 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6597 BT_DBG("Busy, discarding expected seq %d",
6598 control->txseq);
6599 break;
6600 }
6601
6602 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6603 pi->buffer_seq = pi->expected_tx_seq;
6604 skb_in_use = 1;
6605
6606 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6607 if (err)
6608 break;
6609
6610 if (control->final) {
6611 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6612 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6613 else
6614 control->final = 0;
6615 }
6616 }
6617 break;
6618 case L2CAP_ERTM_EVENT_RECV_RR:
6619 case L2CAP_ERTM_EVENT_RECV_RNR:
6620 case L2CAP_ERTM_EVENT_RECV_REJ:
6621 l2cap_ertm_process_reqseq(sk, control->reqseq);
6622 break;
6623 case L2CAP_ERTM_EVENT_RECV_SREJ:
6624 /* Ignore */
6625 break;
6626 default:
6627 break;
6628 }
6629
6630 if (skb && !skb_in_use) {
6631 BT_DBG("Freeing %p", skb);
6632 kfree_skb(skb);
6633 }
6634
6635 return err;
6636}
6637
6638static int l2cap_answer_move_poll(struct sock *sk)
6639{
6640 struct l2cap_pinfo *pi;
6641 struct bt_l2cap_control control;
6642 int err = 0;
6643
6644 BT_DBG("sk %p", sk);
6645
6646 pi = l2cap_pi(sk);
6647
6648 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6649
6650 if (!skb_queue_empty(TX_QUEUE(sk)))
6651 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6652 else
6653 sk->sk_send_head = NULL;
6654
6655 /* Rewind next_tx_seq to the point expected
6656 * by the receiver.
6657 */
6658 pi->next_tx_seq = pi->amp_move_reqseq;
6659 pi->unacked_frames = 0;
6660
6661 err = l2cap_finish_amp_move(sk);
6662
6663 if (err)
6664 return err;
6665
6666 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6667 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6668
6669 memset(&control, 0, sizeof(control));
6670 control.reqseq = pi->amp_move_reqseq;
6671
6672 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6673 err = -EPROTO;
6674 else
6675 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6676 pi->amp_move_event);
6677
6678 return err;
6679}
6680
6681static void l2cap_amp_move_setup(struct sock *sk)
6682{
6683 struct l2cap_pinfo *pi;
6684 struct sk_buff *skb;
6685
6686 BT_DBG("sk %p", sk);
6687
6688 pi = l2cap_pi(sk);
6689
6690 l2cap_ertm_stop_ack_timer(pi);
6691 l2cap_ertm_stop_retrans_timer(pi);
6692 l2cap_ertm_stop_monitor_timer(pi);
6693
6694 pi->retry_count = 0;
6695 skb_queue_walk(TX_QUEUE(sk), skb) {
6696 if (bt_cb(skb)->retries)
6697 bt_cb(skb)->retries = 1;
6698 else
6699 break;
6700 }
6701
6702 pi->expected_tx_seq = pi->buffer_seq;
6703
6704 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6705 l2cap_seq_list_clear(&pi->retrans_list);
6706 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6707 skb_queue_purge(SREJ_QUEUE(sk));
6708
6709 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6710 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6711
6712 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6713 pi->rx_state);
6714
6715 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6716}
6717
6718static void l2cap_amp_move_revert(struct sock *sk)
6719{
6720 struct l2cap_pinfo *pi;
6721
6722 BT_DBG("sk %p", sk);
6723
6724 pi = l2cap_pi(sk);
6725
6726 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6727 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6728 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6729 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6730 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6731}
6732
6733static int l2cap_amp_move_reconf(struct sock *sk)
6734{
6735 struct l2cap_pinfo *pi;
6736 u8 buf[64];
6737 int err = 0;
6738
6739 BT_DBG("sk %p", sk);
6740
6741 pi = l2cap_pi(sk);
6742
6743 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6744 l2cap_build_amp_reconf_req(sk, buf), buf);
6745 return err;
6746}
6747
6748static void l2cap_amp_move_success(struct sock *sk)
6749{
6750 struct l2cap_pinfo *pi;
6751
6752 BT_DBG("sk %p", sk);
6753
6754 pi = l2cap_pi(sk);
6755
6756 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6757 int err = 0;
6758 /* Send reconfigure request */
6759 if (pi->mode == L2CAP_MODE_ERTM) {
6760 pi->reconf_state = L2CAP_RECONF_INT;
6761 if (enable_reconfig)
6762 err = l2cap_amp_move_reconf(sk);
6763
6764 if (err || !enable_reconfig) {
6765 pi->reconf_state = L2CAP_RECONF_NONE;
6766 l2cap_ertm_tx(sk, NULL, NULL,
6767 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6768 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6769 }
6770 } else
6771 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6772 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6773 if (pi->mode == L2CAP_MODE_ERTM)
6774 pi->rx_state =
6775 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6776 else
6777 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6778 }
6779}
6780
6781static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6782{
6783 /* Make sure reqseq is for a packet that has been sent but not acked */
6784 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6785 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6786}
6787
6788static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6789 struct sk_buff *skb)
6790{
6791 struct l2cap_pinfo *pi;
6792 int err = 0;
6793
6794 BT_DBG("sk %p, control %p, skb %p, state %d",
6795 sk, control, skb, l2cap_pi(sk)->rx_state);
6796
6797 pi = l2cap_pi(sk);
6798
6799 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6800 L2CAP_ERTM_TXSEQ_EXPECTED) {
6801 l2cap_ertm_pass_to_tx(sk, control);
6802
6803 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6804 __next_seq(pi->buffer_seq, pi));
6805
6806 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6807
6808 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6809 } else {
6810 if (pi->sdu) {
6811 kfree_skb(pi->sdu);
6812 pi->sdu = NULL;
6813 }
6814 pi->sdu_last_frag = NULL;
6815 pi->sdu_len = 0;
6816
6817 if (skb) {
6818 BT_DBG("Freeing %p", skb);
6819 kfree_skb(skb);
6820 }
6821 }
6822
6823 pi->last_acked_seq = control->txseq;
6824 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6825
6826 return err;
6827}
6828
6829static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6830 struct sk_buff *skb, u8 event)
6831{
6832 struct l2cap_pinfo *pi;
6833 int err = 0;
6834
6835 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6836 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6837
6838 pi = l2cap_pi(sk);
6839
6840 if (__valid_reqseq(pi, control->reqseq)) {
6841 switch (pi->rx_state) {
6842 case L2CAP_ERTM_RX_STATE_RECV:
6843 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6844 break;
6845 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6846 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6847 event);
6848 break;
6849 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6850 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6851 event);
6852 break;
6853 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6854 if (control->final) {
6855 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6856 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6857
6858 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6859 l2cap_ertm_process_reqseq(sk, control->reqseq);
6860
6861 if (!skb_queue_empty(TX_QUEUE(sk)))
6862 sk->sk_send_head =
6863 skb_peek(TX_QUEUE(sk));
6864 else
6865 sk->sk_send_head = NULL;
6866
6867 /* Rewind next_tx_seq to the point expected
6868 * by the receiver.
6869 */
6870 pi->next_tx_seq = control->reqseq;
6871 pi->unacked_frames = 0;
6872
6873 if (pi->ampcon)
6874 pi->conn->mtu =
6875 pi->ampcon->hdev->acl_mtu;
6876 else
6877 pi->conn->mtu =
6878 pi->conn->hcon->hdev->acl_mtu;
6879
6880 err = l2cap_setup_resegment(sk);
6881
6882 if (err)
6883 break;
6884
6885 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6886 event);
6887 }
6888 break;
6889 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6890 if (control->poll) {
6891 pi->amp_move_reqseq = control->reqseq;
6892 pi->amp_move_event = event;
6893 err = l2cap_answer_move_poll(sk);
6894 }
6895 break;
6896 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6897 if (control->poll) {
6898 pi->amp_move_reqseq = control->reqseq;
6899 pi->amp_move_event = event;
6900
6901 BT_DBG("amp_move_role 0x%2.2x, "
6902 "reconf_state 0x%2.2x",
6903 pi->amp_move_role, pi->reconf_state);
6904
6905 if (pi->reconf_state == L2CAP_RECONF_ACC)
6906 err = l2cap_amp_move_reconf(sk);
6907 else
6908 err = l2cap_answer_move_poll(sk);
6909 }
6910 break;
6911 default:
6912 /* shut it down */
6913 break;
6914 }
6915 } else {
6916 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6917 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6918 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6919 }
6920
6921 return err;
6922}
6923
6924void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6925{
6926 lock_sock(sk);
6927
6928 l2cap_pi(sk)->fixed_channel = 1;
6929
6930 l2cap_pi(sk)->imtu = opt->imtu;
6931 l2cap_pi(sk)->omtu = opt->omtu;
6932 l2cap_pi(sk)->remote_mps = opt->omtu;
6933 l2cap_pi(sk)->mps = opt->omtu;
6934 l2cap_pi(sk)->flush_to = opt->flush_to;
6935 l2cap_pi(sk)->mode = opt->mode;
6936 l2cap_pi(sk)->fcs = opt->fcs;
6937 l2cap_pi(sk)->max_tx = opt->max_tx;
6938 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6939 l2cap_pi(sk)->tx_win = opt->txwin_size;
6940 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6941 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6942 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6943
6944 if (opt->mode == L2CAP_MODE_ERTM ||
6945 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6946 l2cap_ertm_init(sk);
6947
6948 release_sock(sk);
6949
6950 return;
6951}
6952
6953static const u8 l2cap_ertm_rx_func_to_event[4] = {
6954 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6955 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6956};
6957
6958int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6959{
6960 struct l2cap_pinfo *pi;
6961 struct bt_l2cap_control *control;
6962 u16 len;
6963 u8 event;
6964 pi = l2cap_pi(sk);
6965
6966 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6967
6968 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006969 goto drop;
6970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006971 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006972 case L2CAP_MODE_BASIC:
6973 /* If socket recv buffers overflows we drop data here
6974 * which is *bad* because L2CAP has to be reliable.
6975 * But we don't have any other choice. L2CAP doesn't
6976 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006978 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006979 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006981 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006982 goto done;
6983 break;
6984
6985 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006986 case L2CAP_MODE_STREAMING:
6987 control = &bt_cb(skb)->control;
6988 if (pi->extended_control) {
6989 __get_extended_control(get_unaligned_le32(skb->data),
6990 control);
6991 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006992 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006993 __get_enhanced_control(get_unaligned_le16(skb->data),
6994 control);
6995 skb_pull(skb, 2);
6996 }
6997
6998 len = skb->len;
6999
7000 if (l2cap_check_fcs(pi, skb))
7001 goto drop;
7002
7003 if ((control->frame_type == 'i') &&
7004 (control->sar == L2CAP_SAR_START))
7005 len -= 2;
7006
7007 if (pi->fcs == L2CAP_FCS_CRC16)
7008 len -= 2;
7009
7010 /*
7011 * We can just drop the corrupted I-frame here.
7012 * Receiver will miss it and start proper recovery
7013 * procedures and ask for retransmission.
7014 */
7015 if (len > pi->mps) {
7016 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7017 goto drop;
7018 }
7019
7020 if (control->frame_type == 'i') {
7021
7022 int err;
7023
7024 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7025 control->sar, control->reqseq, control->final,
7026 control->txseq);
7027
7028 /* Validate F-bit - F=0 always valid, F=1 only
7029 * valid in TX WAIT_F
7030 */
7031 if (control->final && (pi->tx_state !=
7032 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007033 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007034
7035 if (pi->mode != L2CAP_MODE_STREAMING) {
7036 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
7037 err = l2cap_ertm_rx(sk, control, skb, event);
7038 } else
7039 err = l2cap_strm_rx(sk, control, skb);
7040 if (err)
7041 l2cap_send_disconn_req(pi->conn, sk,
7042 ECONNRESET);
7043 } else {
7044 /* Only I-frames are expected in streaming mode */
7045 if (pi->mode == L2CAP_MODE_STREAMING)
7046 goto drop;
7047
7048 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7049 control->reqseq, control->final, control->poll,
7050 control->super);
7051
7052 if (len != 0) {
7053 l2cap_send_disconn_req(pi->conn, sk,
7054 ECONNRESET);
7055 goto drop;
7056 }
7057
7058 /* Validate F and P bits */
7059 if (control->final &&
7060 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
7061 || control->poll))
7062 goto drop;
7063
7064 event = l2cap_ertm_rx_func_to_event[control->super];
7065 if (l2cap_ertm_rx(sk, control, skb, event))
7066 l2cap_send_disconn_req(pi->conn, sk,
7067 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007068 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007069
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007070 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007071
7072 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007073 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007074 break;
7075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007076
7077drop:
7078 kfree_skb(skb);
7079
7080done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007081 return 0;
7082}
7083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007084void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7085{
7086 lock_sock(sk);
7087 l2cap_data_channel(sk, skb);
7088 release_sock(sk);
7089}
7090
Al Viro8e036fc2007-07-29 00:16:36 -07007091static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007092{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007093 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007095 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7096 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097 goto drop;
7098
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007099 bh_lock_sock(sk);
7100
Linus Torvalds1da177e2005-04-16 15:20:36 -07007101 BT_DBG("sk %p, len %d", sk, skb->len);
7102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007103 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104 goto drop;
7105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007106 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107 goto drop;
7108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007109 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110 goto done;
7111
7112drop:
7113 kfree_skb(skb);
7114
7115done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007116 if (sk)
7117 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118 return 0;
7119}
7120
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007121static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7122{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007123 struct sock *sk;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007124 struct sk_buff *skb_rsp;
7125 struct l2cap_hdr *lh;
7126 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7127 L2CAP_ATT_NOT_SUPPORTED};
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007128
Inga Stotlandf214b6e2011-10-11 08:56:15 -07007129 sk = l2cap_get_sock_by_fixed_scid(0, cid, conn->src, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007130 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007131 goto drop;
7132
7133 bh_lock_sock(sk);
7134
7135 BT_DBG("sk %p, len %d", sk, skb->len);
7136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007137 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007138 goto drop;
7139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007140 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007141 goto drop;
7142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007143 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007144 goto done;
7145
7146drop:
Brian Gix7eaa64d2011-10-19 13:17:42 -07007147 if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
7148 skb->data[0] != L2CAP_ATT_INDICATE)
7149 goto free_skb;
7150
7151 /* If this is an incoming PDU that requires a response, respond with
7152 * a generic error so remote device doesn't hang */
7153
7154 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7155 if (!skb_rsp)
7156 goto free_skb;
7157
7158 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7159 lh->len = cpu_to_le16(sizeof(err_rsp));
7160 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7161 err_rsp[1] = skb->data[0];
7162 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7163 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7164
7165free_skb:
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007166 kfree_skb(skb);
7167
7168done:
7169 if (sk)
7170 bh_unlock_sock(sk);
7171 return 0;
7172}
7173
Linus Torvalds1da177e2005-04-16 15:20:36 -07007174static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7175{
7176 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007177 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007178 u16 cid, len;
7179 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180
7181 skb_pull(skb, L2CAP_HDR_SIZE);
7182 cid = __le16_to_cpu(lh->cid);
7183 len = __le16_to_cpu(lh->len);
7184
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007185 if (len != skb->len) {
7186 kfree_skb(skb);
7187 return;
7188 }
7189
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7191
7192 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007193 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007194 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195 l2cap_sig_channel(conn, skb);
7196 break;
7197
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007198 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007199 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200 skb_pull(skb, 2);
7201 l2cap_conless_channel(conn, psm, skb);
7202 break;
7203
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007204 case L2CAP_CID_LE_DATA:
7205 l2cap_att_channel(conn, cid, skb);
7206 break;
7207
Anderson Brigliaea370122011-06-07 18:46:31 -03007208 case L2CAP_CID_SMP:
7209 if (smp_sig_channel(conn, skb))
7210 l2cap_conn_del(conn->hcon, EACCES);
7211 break;
7212
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007214 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7215 if (sk) {
7216 if (sock_owned_by_user(sk)) {
7217 BT_DBG("backlog sk %p", sk);
7218 if (sk_add_backlog(sk, skb))
7219 kfree_skb(skb);
7220 } else
7221 l2cap_data_channel(sk, skb);
7222
7223 bh_unlock_sock(sk);
7224 } else if (cid == L2CAP_CID_A2MP) {
7225 BT_DBG("A2MP");
7226 amp_conn_ind(conn, skb);
7227 } else {
7228 BT_DBG("unknown cid 0x%4.4x", cid);
7229 kfree_skb(skb);
7230 }
7231
Linus Torvalds1da177e2005-04-16 15:20:36 -07007232 break;
7233 }
7234}
7235
7236/* ---- L2CAP interface with lower layer (HCI) ---- */
7237
7238static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7239{
7240 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007241 register struct sock *sk;
7242 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007243
7244 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007245 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246
7247 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7248
7249 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007250 read_lock(&l2cap_sk_list.lock);
7251 sk_for_each(sk, node, &l2cap_sk_list.head) {
7252 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253 continue;
7254
7255 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007256 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007257 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007258 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007260 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7261 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007262 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007263 lm2 |= HCI_LM_MASTER;
7264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007266 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267
7268 return exact ? lm1 : lm2;
7269}
7270
7271static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7272{
Marcel Holtmann01394182006-07-03 10:02:46 +02007273 struct l2cap_conn *conn;
7274
Linus Torvalds1da177e2005-04-16 15:20:36 -07007275 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7276
Ville Tervoacd7d372011-02-10 22:38:49 -03007277 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007278 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007279
7280 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007281 conn = l2cap_conn_add(hcon, status);
7282 if (conn)
7283 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007284 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007285 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007286
7287 return 0;
7288}
7289
Marcel Holtmann2950f212009-02-12 14:02:50 +01007290static int l2cap_disconn_ind(struct hci_conn *hcon)
7291{
7292 struct l2cap_conn *conn = hcon->l2cap_data;
7293
7294 BT_DBG("hcon %p", hcon);
7295
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007296 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007297 return 0x13;
7298
7299 return conn->disc_reason;
7300}
7301
7302static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303{
7304 BT_DBG("hcon %p reason %d", hcon, reason);
7305
Ville Tervoacd7d372011-02-10 22:38:49 -03007306 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007307 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007308
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007309 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007310
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311 return 0;
7312}
7313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007314static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007315{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007316 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007317 return;
7318
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007319 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7321 l2cap_sock_clear_timer(sk);
7322 l2cap_sock_set_timer(sk, HZ * 5);
7323 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7324 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007325 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007326 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7327 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007328 }
7329}
7330
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007331static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007332{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007333 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007334 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007335 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336
Marcel Holtmann01394182006-07-03 10:02:46 +02007337 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007338 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007340 l = &conn->chan_list;
7341
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342 BT_DBG("conn %p", conn);
7343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007344 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007346 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007347 bh_lock_sock(sk);
7348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007349 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007351 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007352 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007353 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007354
Brian Gixe9ceb522011-09-22 10:46:35 -07007355 del_timer(&hcon->smp_timer);
Brian Gixa68668b2011-08-11 15:49:36 -07007356 l2cap_chan_ready(sk);
7357 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007358
7359 bh_unlock_sock(sk);
7360 continue;
7361 }
7362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007363 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007364 bh_unlock_sock(sk);
7365 continue;
7366 }
7367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007368 if (!status && (sk->sk_state == BT_CONNECTED ||
7369 sk->sk_state == BT_CONFIG)) {
7370 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007371 bh_unlock_sock(sk);
7372 continue;
7373 }
7374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007375 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007376 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007377 l2cap_pi(sk)->conf_state |=
7378 L2CAP_CONF_CONNECT_PEND;
7379 if (l2cap_pi(sk)->amp_pref ==
7380 BT_AMP_POLICY_PREFER_AMP) {
7381 amp_create_physical(l2cap_pi(sk)->conn,
7382 sk);
7383 } else
7384 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007385 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007386 l2cap_sock_clear_timer(sk);
7387 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007388 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007389 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007390 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007391 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007392
7393 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007394 if (l2cap_pi(sk)->amp_id) {
7395 amp_accept_physical(conn,
7396 l2cap_pi(sk)->amp_id, sk);
7397 bh_unlock_sock(sk);
7398 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007399 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007400
7401 sk->sk_state = BT_CONFIG;
7402 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007403 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007404 sk->sk_state = BT_DISCONN;
7405 l2cap_sock_set_timer(sk, HZ / 10);
7406 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007407 }
7408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007409 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7410 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7411 rsp.result = cpu_to_le16(result);
7412 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7413 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7414 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007415 }
7416
Linus Torvalds1da177e2005-04-16 15:20:36 -07007417 bh_unlock_sock(sk);
7418 }
7419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007420 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007421
Linus Torvalds1da177e2005-04-16 15:20:36 -07007422 return 0;
7423}
7424
7425static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7426{
7427 struct l2cap_conn *conn = hcon->l2cap_data;
7428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007429 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7430 goto drop;
7431
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007432 if (!conn)
7433 conn = l2cap_conn_add(hcon, 0);
7434
7435 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436 goto drop;
7437
7438 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007440 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007441 struct l2cap_hdr *hdr;
7442 int len;
7443
7444 if (conn->rx_len) {
7445 BT_ERR("Unexpected start frame (len %d)", skb->len);
7446 kfree_skb(conn->rx_skb);
7447 conn->rx_skb = NULL;
7448 conn->rx_len = 0;
7449 l2cap_conn_unreliable(conn, ECOMM);
7450 }
7451
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007452 /* Start fragment always begin with Basic L2CAP header */
7453 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454 BT_ERR("Frame is too short (len %d)", skb->len);
7455 l2cap_conn_unreliable(conn, ECOMM);
7456 goto drop;
7457 }
7458
7459 hdr = (struct l2cap_hdr *) skb->data;
7460 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7461
7462 if (len == skb->len) {
7463 /* Complete frame received */
7464 l2cap_recv_frame(conn, skb);
7465 return 0;
7466 }
7467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007468 if (flags & ACL_CONT) {
7469 BT_ERR("Complete frame is incomplete "
7470 "(len %d, expected len %d)",
7471 skb->len, len);
7472 l2cap_conn_unreliable(conn, ECOMM);
7473 goto drop;
7474 }
7475
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7477
7478 if (skb->len > len) {
7479 BT_ERR("Frame is too long (len %d, expected len %d)",
7480 skb->len, len);
7481 l2cap_conn_unreliable(conn, ECOMM);
7482 goto drop;
7483 }
7484
7485 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007486 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7487 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488 goto drop;
7489
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007490 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007491 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492 conn->rx_len = len - skb->len;
7493 } else {
7494 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7495
7496 if (!conn->rx_len) {
7497 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7498 l2cap_conn_unreliable(conn, ECOMM);
7499 goto drop;
7500 }
7501
7502 if (skb->len > conn->rx_len) {
7503 BT_ERR("Fragment is too long (len %d, expected %d)",
7504 skb->len, conn->rx_len);
7505 kfree_skb(conn->rx_skb);
7506 conn->rx_skb = NULL;
7507 conn->rx_len = 0;
7508 l2cap_conn_unreliable(conn, ECOMM);
7509 goto drop;
7510 }
7511
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007512 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007513 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007514 conn->rx_len -= skb->len;
7515
7516 if (!conn->rx_len) {
7517 /* Complete frame received */
7518 l2cap_recv_frame(conn, conn->rx_skb);
7519 conn->rx_skb = NULL;
7520 }
7521 }
7522
7523drop:
7524 kfree_skb(skb);
7525 return 0;
7526}
7527
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007528static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007530 struct sock *sk;
7531 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007533 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007535 sk_for_each(sk, node, &l2cap_sk_list.head) {
7536 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007538 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007539 batostr(&bt_sk(sk)->src),
7540 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007541 sk->sk_state, __le16_to_cpu(pi->psm),
7542 pi->scid, pi->dcid,
7543 pi->imtu, pi->omtu, pi->sec_level,
7544 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007547 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007548
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007549 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550}
7551
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007552static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7553{
7554 return single_open(file, l2cap_debugfs_show, inode->i_private);
7555}
7556
7557static const struct file_operations l2cap_debugfs_fops = {
7558 .open = l2cap_debugfs_open,
7559 .read = seq_read,
7560 .llseek = seq_lseek,
7561 .release = single_release,
7562};
7563
7564static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566static struct hci_proto l2cap_hci_proto = {
7567 .name = "L2CAP",
7568 .id = HCI_PROTO_L2CAP,
7569 .connect_ind = l2cap_connect_ind,
7570 .connect_cfm = l2cap_connect_cfm,
7571 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007572 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007573 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007574 .recv_acldata = l2cap_recv_acldata,
7575 .create_cfm = l2cap_create_cfm,
7576 .modify_cfm = l2cap_modify_cfm,
7577 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578};
7579
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007580int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581{
7582 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007583
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007584 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007585 if (err < 0)
7586 return err;
7587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007588 _l2cap_wq = create_singlethread_workqueue("l2cap");
7589 if (!_l2cap_wq) {
7590 err = -ENOMEM;
7591 goto error;
7592 }
7593
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594 err = hci_register_proto(&l2cap_hci_proto);
7595 if (err < 0) {
7596 BT_ERR("L2CAP protocol registration failed");
7597 bt_sock_unregister(BTPROTO_L2CAP);
7598 goto error;
7599 }
7600
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007601 if (bt_debugfs) {
7602 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7603 bt_debugfs, NULL, &l2cap_debugfs_fops);
7604 if (!l2cap_debugfs)
7605 BT_ERR("Failed to create L2CAP debug file");
7606 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007608 if (amp_init() < 0) {
7609 BT_ERR("AMP Manager initialization failed");
7610 goto error;
7611 }
7612
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613 return 0;
7614
7615error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007616 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007617 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007618 return err;
7619}
7620
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007621void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007622{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007623 amp_exit();
7624
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007625 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007627 flush_workqueue(_l2cap_wq);
7628 destroy_workqueue(_l2cap_wq);
7629
Linus Torvalds1da177e2005-04-16 15:20:36 -07007630 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7631 BT_ERR("L2CAP protocol unregistration failed");
7632
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007633 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007634}
7635
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007636module_param(disable_ertm, bool, 0644);
7637MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007638
7639module_param(enable_reconfig, bool, 0644);
7640MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");