blob: 84bb9efd1e0fc4d74b3d82cf87de36e83258d216 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
552 hci_chan_put(l2cap_pi(sk)->ampchan);
553 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
554 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
555 l2cap_pi(sk));
556 }
557 l2cap_pi(sk)->ampchan = NULL;
558 l2cap_pi(sk)->amp_id = 0;
559 }
560
561 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200562 sock_set_flag(sk, SOCK_ZAPPED);
563
564 if (err)
565 sk->sk_err = err;
566
567 if (parent) {
568 bt_accept_unlink(sk);
569 parent->sk_data_ready(parent, 0);
570 } else
571 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
576 if (l2cap_pi(sk)->sdu)
577 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
583 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300584 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (sk->sk_type == SOCK_RAW) {
590 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530591 case BT_SECURITY_HIGH:
592 return HCI_AT_DEDICATED_BONDING_MITM;
593 case BT_SECURITY_MEDIUM:
594 return HCI_AT_DEDICATED_BONDING;
595 default:
596 return HCI_AT_NO_BONDING;
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
599 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
600 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530603 return HCI_AT_NO_BONDING_MITM;
604 else
605 return HCI_AT_NO_BONDING;
606 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530608 case BT_SECURITY_HIGH:
609 return HCI_AT_GENERAL_BONDING_MITM;
610 case BT_SECURITY_MEDIUM:
611 return HCI_AT_GENERAL_BONDING;
612 default:
613 return HCI_AT_NO_BONDING;
614 }
615 }
616}
617
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200618/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100622 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
627 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200631{
632 u8 id;
633
634 /* Get next available identificator.
635 * 1 - 128 are used by kernel.
636 * 129 - 199 are reserved.
637 * 200 - 254 are used by utilities like l2ping, etc.
638 */
639
640 spin_lock_bh(&conn->lock);
641
642 if (++conn->tx_ident > 128)
643 conn->tx_ident = 1;
644
645 id = conn->tx_ident;
646
647 spin_unlock_bh(&conn->lock);
648
649 return id;
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void apply_fcs(struct sk_buff *skb)
653{
654 size_t len;
655 u16 partial_crc;
656 struct sk_buff *iter;
657 struct sk_buff *final_frag = skb;
658
659 if (skb_has_frag_list(skb))
660 len = skb_headlen(skb);
661 else
662 len = skb->len - L2CAP_FCS_SIZE;
663
664 partial_crc = crc16(0, (u8 *) skb->data, len);
665
666 skb_walk_frags(skb, iter) {
667 len = iter->len;
668 if (!iter->next)
669 len -= L2CAP_FCS_SIZE;
670
671 partial_crc = crc16(partial_crc, iter->data, len);
672 final_frag = iter;
673 }
674
675 put_unaligned_le16(partial_crc,
676 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
677}
678
679void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200680{
681 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200682 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200683
684 BT_DBG("code 0x%2.2x", code);
685
686 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300687 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200688
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200689 if (lmp_no_flush_capable(conn->hcon->hdev))
690 flags = ACL_START_NO_FLUSH;
691 else
692 flags = ACL_START;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697}
698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300702}
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300705{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 struct l2cap_conn_req req;
707 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
708 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
713 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300717{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 struct l2cap_create_chan_req req;
719 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
720 req.psm = l2cap_pi(sk)->psm;
721 req.amp_id = amp_id;
722
723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
725
726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100735 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
736 return;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
739 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200740
Peter Krystadc446d212011-09-20 15:35:50 -0700741 if (l2cap_pi(sk)->amp_pref ==
742 BT_AMP_POLICY_PREFER_AMP &&
743 conn->fc_mask & L2CAP_FC_A2MP)
744 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 else
746 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200747 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200748 } else {
749 struct l2cap_info_req req;
750 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
751
752 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
753 conn->info_ident = l2cap_get_ident(conn);
754
755 mod_timer(&conn->info_timer, jiffies +
756 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
757
758 l2cap_send_cmd(conn, conn->info_ident,
759 L2CAP_INFO_REQ, sizeof(req), &req);
760 }
761}
762
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300763static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
764{
765 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300766 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300767 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
768
769 switch (mode) {
770 case L2CAP_MODE_ERTM:
771 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
772 case L2CAP_MODE_STREAMING:
773 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
774 default:
775 return 0x00;
776 }
777}
778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300780{
781 struct l2cap_disconn_req req;
782
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300783 if (!conn)
784 return;
785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
789 skb_queue_purge(SREJ_QUEUE(sk));
790
791 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
792 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
793 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300794 }
795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
797 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300798 l2cap_send_cmd(conn, l2cap_get_ident(conn),
799 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300802 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300803}
804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200806static void l2cap_conn_start(struct l2cap_conn *conn)
807{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 struct l2cap_chan_list *l = &conn->chan_list;
809 struct sock_del_list del, *tmp1, *tmp2;
810 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200811
812 BT_DBG("conn %p", conn);
813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300817
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200819 bh_lock_sock(sk);
820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 if (sk->sk_type != SOCK_SEQPACKET &&
822 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200823 bh_unlock_sock(sk);
824 continue;
825 }
826
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 if (sk->sk_state == BT_CONNECT) {
828 if (!l2cap_check_security(sk) ||
829 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300830 bh_unlock_sock(sk);
831 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200832 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
835 conn->feat_mask)
836 && l2cap_pi(sk)->conf_state &
837 L2CAP_CONF_STATE2_DEVICE) {
838 tmp1 = kzalloc(sizeof(struct sock_del_list),
839 GFP_ATOMIC);
840 tmp1->sk = sk;
841 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300842 bh_unlock_sock(sk);
843 continue;
844 }
845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300847
Peter Krystadc446d212011-09-20 15:35:50 -0700848 if (l2cap_pi(sk)->amp_pref ==
849 BT_AMP_POLICY_PREFER_AMP &&
850 conn->fc_mask & L2CAP_FC_A2MP)
851 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 else
853 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200856 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300857 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
859 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200860
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100862 if (bt_sk(sk)->defer_setup) {
863 struct sock *parent = bt_sk(sk)->parent;
864 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
865 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700866 if (parent)
867 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100868
869 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100871 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
872 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
873 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200874 } else {
875 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
876 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
877 }
878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700879 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
880 l2cap_pi(sk)->amp_id) {
881 amp_accept_physical(conn,
882 l2cap_pi(sk)->amp_id, sk);
883 bh_unlock_sock(sk);
884 continue;
885 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
888 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
889
890 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300891 rsp.result != L2CAP_CR_SUCCESS) {
892 bh_unlock_sock(sk);
893 continue;
894 }
895
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300897 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 l2cap_build_conf_req(sk, buf), buf);
899 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200900 }
901
902 bh_unlock_sock(sk);
903 }
904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 read_unlock(&l->lock);
906
907 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
908 bh_lock_sock(tmp1->sk);
909 __l2cap_sock_close(tmp1->sk, ECONNRESET);
910 bh_unlock_sock(tmp1->sk);
911 list_del(&tmp1->list);
912 kfree(tmp1);
913 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200914}
915
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700916/* Find socket with fixed cid with given source and destination bdaddrs.
917 * Returns closest match, locked.
918 */
919static struct sock *l2cap_get_sock_by_fixed_scid(int state,
920 __le16 cid, bdaddr_t *src, bdaddr_t *dst)
921{
922 struct sock *sk = NULL, *sk1 = NULL;
923 struct hlist_node *node;
924
925 read_lock(&l2cap_sk_list.lock);
926
927 sk_for_each(sk, node, &l2cap_sk_list.head) {
928 if (state && sk->sk_state != state)
929 continue;
930
931 if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
932 /* Exact match. */
933 if (!bacmp(&bt_sk(sk)->src, src))
934 break;
935
936 /* Closest match */
937 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
938 sk1 = sk;
939 }
940 }
941
942 read_unlock(&l2cap_sk_list.lock);
943
944 return node ? sk : sk1;
945}
946
Ville Tervob62f3282011-02-10 22:38:50 -0300947/* Find socket with cid and source bdaddr.
948 * Returns closest match, locked.
949 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300951{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 struct sock *sk = NULL, *sk1 = NULL;
953 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 sk_for_each(sk, node, &l2cap_sk_list.head) {
958 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300959 continue;
960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300962 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 if (!bacmp(&bt_sk(sk)->src, src))
964 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300965
966 /* Closest match */
967 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300969 }
970 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300975}
976
977static void l2cap_le_conn_ready(struct l2cap_conn *conn)
978{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 struct l2cap_chan_list *list = &conn->chan_list;
980 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300981
982 BT_DBG("");
983
984 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300986 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300988 return;
989
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300990 bh_lock_sock(parent);
991
Ville Tervob62f3282011-02-10 22:38:50 -0300992 /* Check for backlog size */
993 if (sk_acceptq_is_full(parent)) {
994 BT_DBG("backlog full %d", parent->sk_ack_backlog);
995 goto clean;
996 }
997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
999 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -03001000 goto clean;
1001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001003
1004 hci_conn_hold(conn->hcon);
1005
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -03001007 bacpy(&bt_sk(sk)->src, conn->src);
1008 bacpy(&bt_sk(sk)->dst, conn->dst);
1009
Gustavo F. Padovand1010242011-03-25 00:39:48 -03001010 bt_accept_enqueue(parent, sk);
1011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001012 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -03001015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -03001017 parent->sk_data_ready(parent, 0);
1018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001020
1021clean:
1022 bh_unlock_sock(parent);
1023}
1024
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001025static void l2cap_conn_ready(struct l2cap_conn *conn)
1026{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027 struct l2cap_chan_list *l = &conn->chan_list;
1028 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001029
1030 BT_DBG("conn %p", conn);
1031
Ville Tervob62f3282011-02-10 22:38:50 -03001032 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1033 l2cap_le_conn_ready(conn);
1034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001036
Brian Gixa68668b2011-08-11 15:49:36 -07001037 if (l->head) {
1038 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1039 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001040
Brian Gixa68668b2011-08-11 15:49:36 -07001041 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001042 u8 sec_level = l2cap_pi(sk)->sec_level;
1043 u8 pending_sec = conn->hcon->pending_sec_level;
1044
1045 if (pending_sec > sec_level)
1046 sec_level = pending_sec;
1047
Brian Gix065e8ff2011-09-29 15:14:08 -07001048 if (smp_conn_security(conn, sec_level)) {
Brian Gixa68668b2011-08-11 15:49:36 -07001049 l2cap_chan_ready(sk);
Brian Gix065e8ff2011-09-29 15:14:08 -07001050 hci_conn_put(conn->hcon);
1051 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001052
Brian Gixa68668b2011-08-11 15:49:36 -07001053 } else if (sk->sk_type != SOCK_SEQPACKET &&
1054 sk->sk_type != SOCK_STREAM) {
1055 l2cap_sock_clear_timer(sk);
1056 sk->sk_state = BT_CONNECTED;
1057 sk->sk_state_change(sk);
1058 } else if (sk->sk_state == BT_CONNECT)
1059 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001060
Brian Gixa68668b2011-08-11 15:49:36 -07001061 bh_unlock_sock(sk);
1062 }
1063 } else if (conn->hcon->type == LE_LINK) {
1064 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001065 }
1066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001068}
1069
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001070/* Notify sockets that we cannot guaranty reliability anymore */
1071static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1072{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 struct l2cap_chan_list *l = &conn->chan_list;
1074 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001075
1076 BT_DBG("conn %p", conn);
1077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1081 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001082 sk->sk_err = err;
1083 }
1084
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001086}
1087
1088static void l2cap_info_timeout(unsigned long arg)
1089{
1090 struct l2cap_conn *conn = (void *) arg;
1091
Marcel Holtmann984947d2009-02-06 23:35:19 +01001092 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001093 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001094
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001095 l2cap_conn_start(conn);
1096}
1097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1099{
Marcel Holtmann01394182006-07-03 10:02:46 +02001100 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Marcel Holtmann01394182006-07-03 10:02:46 +02001102 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 return conn;
1104
Marcel Holtmann01394182006-07-03 10:02:46 +02001105 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1106 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 hcon->l2cap_data = conn;
1110 conn->hcon = hcon;
1111
Marcel Holtmann01394182006-07-03 10:02:46 +02001112 BT_DBG("hcon %p conn %p", hcon, conn);
1113
Ville Tervoacd7d372011-02-10 22:38:49 -03001114 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1115 conn->mtu = hcon->hdev->le_mtu;
1116 else
1117 conn->mtu = hcon->hdev->acl_mtu;
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 conn->src = &hcon->hdev->bdaddr;
1120 conn->dst = &hcon->dst;
1121
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001122 conn->feat_mask = 0;
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001127 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001128 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001129 (unsigned long) conn);
1130 else
Ville Tervob62f3282011-02-10 22:38:50 -03001131 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001132 (unsigned long) conn);
1133
Marcel Holtmann2950f212009-02-12 14:02:50 +01001134 conn->disc_reason = 0x13;
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 return conn;
1137}
1138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 struct l2cap_conn *conn = hcon->l2cap_data;
1142 struct sock *sk;
1143 struct sock *next;
1144
1145 if (!conn)
1146 return;
1147
1148 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1149
1150 if ((conn->hcon == hcon) && (conn->rx_skb))
1151 kfree_skb(conn->rx_skb);
1152
1153 BT_DBG("conn->hcon %p", conn->hcon);
1154
1155 /* Kill channels */
1156 for (sk = conn->chan_list.head; sk; ) {
1157 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1158 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1159 next = l2cap_pi(sk)->next_c;
1160 bh_lock_sock(sk);
1161 l2cap_chan_del(sk, err);
1162 bh_unlock_sock(sk);
1163 l2cap_sock_kill(sk);
1164 sk = next;
1165 } else
1166 sk = l2cap_pi(sk)->next_c;
1167 }
1168
1169 if (conn->hcon == hcon) {
1170 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1171 del_timer_sync(&conn->info_timer);
1172
1173 hcon->l2cap_data = NULL;
1174
1175 kfree(conn);
1176 }
1177}
1178
1179static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1180{
1181 struct l2cap_chan_list *l = &conn->chan_list;
1182 write_lock_bh(&l->lock);
1183 __l2cap_chan_add(conn, sk);
1184 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185}
1186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
1189/* Find socket with psm and source bdaddr.
1190 * Returns closest match.
1191 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 struct sock *sk = NULL, *sk1 = NULL;
1195 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 sk_for_each(sk, node, &l2cap_sk_list.head) {
1200 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 continue;
1202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 if (!bacmp(&bt_sk(sk)->src, src))
1206 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208 /* Closest match */
1209 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 }
1212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
1218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220{
1221 bdaddr_t *src = &bt_sk(sk)->src;
1222 bdaddr_t *dst = &bt_sk(sk)->dst;
1223 struct l2cap_conn *conn;
1224 struct hci_conn *hcon;
1225 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001226 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001227 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001229 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001232 hdev = hci_get_route(dst, src);
1233 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 return -EHOSTUNREACH;
1235
1236 hci_dev_lock_bh(hdev);
1237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 if (l2cap_pi(sk)->fixed_channel) {
1241 /* Fixed channels piggyback on existing ACL connections */
1242 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1243 if (!hcon || !hcon->l2cap_data) {
1244 err = -ENOTCONN;
1245 goto done;
1246 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 conn = hcon->l2cap_data;
1249 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001250 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001252 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001253 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001255 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 if (IS_ERR(hcon)) {
1258 err = PTR_ERR(hcon);
1259 goto done;
1260 }
1261
1262 conn = l2cap_conn_add(hcon, 0);
1263 if (!conn) {
1264 hci_conn_put(hcon);
1265 err = -ENOMEM;
1266 goto done;
1267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Update source addr of the socket */
1271 bacpy(src, conn->src);
1272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001274
Brian Gixa68668b2011-08-11 15:49:36 -07001275 if ((l2cap_pi(sk)->fixed_channel) ||
1276 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1277 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 sk->sk_state = BT_CONNECTED;
1279 sk->sk_state_change(sk);
1280 } else {
1281 sk->sk_state = BT_CONNECT;
1282 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1283 sk->sk_state_change(sk);
1284
1285 if (hcon->state == BT_CONNECTED) {
1286 if (sk->sk_type != SOCK_SEQPACKET &&
1287 sk->sk_type != SOCK_STREAM) {
1288 l2cap_sock_clear_timer(sk);
1289 if (l2cap_check_security(sk)) {
1290 sk->sk_state = BT_CONNECTED;
1291 sk->sk_state_change(sk);
1292 }
1293 } else
1294 l2cap_do_start(sk);
1295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 }
1297
Ville Tervo30e76272011-02-22 16:10:53 -03001298 err = 0;
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300done:
1301 hci_dev_unlock_bh(hdev);
1302 hci_dev_put(hdev);
1303 return err;
1304}
1305
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001306int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001307{
1308 DECLARE_WAITQUEUE(wait, current);
1309 int err = 0;
1310 int timeo = HZ/5;
1311
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001312 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1314 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1315 set_current_state(TASK_INTERRUPTIBLE);
1316
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001317 if (!timeo)
1318 timeo = HZ/5;
1319
1320 if (signal_pending(current)) {
1321 err = sock_intr_errno(timeo);
1322 break;
1323 }
1324
1325 release_sock(sk);
1326 timeo = schedule_timeout(timeo);
1327 lock_sock(sk);
1328
1329 err = sock_error(sk);
1330 if (err)
1331 break;
1332 }
1333 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001334 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001335 return err;
1336}
1337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001339{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 struct l2cap_pinfo *pi =
1341 container_of(work, struct l2cap_pinfo, tx_work);
1342 struct sock *sk = (struct sock *)pi;
1343 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 lock_sock(sk);
1346 l2cap_ertm_send(sk);
1347 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07001348 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001349}
1350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001352{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353 struct sock *sk = skb->sk;
1354 int queued;
Mat Martineau2f0cd842011-10-20 14:34:26 -07001355 int keep_sk = 0;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1358 if (queued < L2CAP_MIN_ERTM_QUEUED)
Mat Martineau2f0cd842011-10-20 14:34:26 -07001359 keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
1360
1361 if (!keep_sk)
1362 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001363}
1364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001366{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1372 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1373 BT_DBG("Sending on AMP connection %p %p",
1374 pi->ampcon, pi->ampchan);
1375 if (pi->ampchan)
1376 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1377 ACL_COMPLETE);
1378 else
1379 kfree_skb(skb);
1380 } else {
1381 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 bt_cb(skb)->force_active = pi->force_active;
1384 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1387 !l2cap_pi(sk)->flushable)
1388 flags = ACL_START_NO_FLUSH;
1389 else
1390 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001393 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001394}
1395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001397{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001398 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 struct l2cap_pinfo *pi = l2cap_pi(sk);
1400 struct bt_l2cap_control *control;
1401 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001406 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1409 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1412 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1413 return 0;
1414
1415 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1416 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1417 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1418
1419 skb = sk->sk_send_head;
1420
1421 bt_cb(skb)->retries = 1;
1422 control = &bt_cb(skb)->control;
1423
1424 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1425 control->final = 1;
1426 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1427 }
1428 control->reqseq = pi->buffer_seq;
1429 pi->last_acked_seq = pi->buffer_seq;
1430 control->txseq = pi->next_tx_seq;
1431
1432 if (pi->extended_control) {
1433 put_unaligned_le32(__pack_extended_control(control),
1434 skb->data + L2CAP_HDR_SIZE);
1435 } else {
1436 put_unaligned_le16(__pack_enhanced_control(control),
1437 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001438 }
1439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440 if (pi->fcs == L2CAP_FCS_CRC16)
1441 apply_fcs(skb);
1442
1443 /* Clone after data has been modified. Data is assumed to be
1444 read-only (for locking purposes) on cloned sk_buffs.
1445 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001446 tx_skb = skb_clone(skb, GFP_ATOMIC);
1447
Mat Martineau2f0cd842011-10-20 14:34:26 -07001448 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001449 tx_skb->sk = sk;
1450 tx_skb->destructor = l2cap_skb_destructor;
1451 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1460 pi->unacked_frames += 1;
1461 pi->frames_sent += 1;
1462 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1465 sk->sk_send_head = NULL;
1466 else
1467 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468 }
1469
1470 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1471 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1472 atomic_read(&pi->ertm_queued));
1473
1474 return sent;
1475}
1476
1477int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1478{
1479 struct sk_buff *skb;
1480 struct l2cap_pinfo *pi = l2cap_pi(sk);
1481 struct bt_l2cap_control *control;
1482 int sent = 0;
1483
1484 BT_DBG("sk %p, skbs %p", sk, skbs);
1485
1486 if (sk->sk_state != BT_CONNECTED)
1487 return -ENOTCONN;
1488
1489 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1490 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1491 return 0;
1492
1493 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1494
1495 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1496 while (!skb_queue_empty(TX_QUEUE(sk))) {
1497
1498 skb = skb_dequeue(TX_QUEUE(sk));
1499
1500 BT_DBG("skb %p", skb);
1501
1502 bt_cb(skb)->retries = 1;
1503 control = &bt_cb(skb)->control;
1504
1505 BT_DBG("control %p", control);
1506
1507 control->reqseq = 0;
1508 control->txseq = pi->next_tx_seq;
1509
1510 if (pi->extended_control) {
1511 put_unaligned_le32(__pack_extended_control(control),
1512 skb->data + L2CAP_HDR_SIZE);
1513 } else {
1514 put_unaligned_le16(__pack_enhanced_control(control),
1515 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001516 }
1517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 if (pi->fcs == L2CAP_FCS_CRC16)
1519 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001521 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1526 pi->frames_sent += 1;
1527 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001528 }
1529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530 BT_DBG("Sent %d", sent);
1531
1532 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001533}
1534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001536{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537 while (len > 0) {
1538 if (iv->iov_len) {
1539 int copy = min_t(unsigned int, len, iv->iov_len);
1540 memcpy(kdata, iv->iov_base, copy);
1541 len -= copy;
1542 kdata += copy;
1543 iv->iov_base += copy;
1544 iv->iov_len -= copy;
1545 }
1546 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001547 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001550}
1551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001552static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1553 int len, int count, struct sk_buff *skb,
1554 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001555{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001557 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001559 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1562 msg, (int)len, (int)count, skb);
1563
1564 if (!conn)
1565 return -ENOTCONN;
1566
1567 /* When resegmenting, data is copied from kernel space */
1568 if (reseg) {
1569 err = memcpy_fromkvec(skb_put(skb, count),
1570 (struct kvec *) msg->msg_iov, count);
1571 } else {
1572 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1573 count);
1574 }
1575
1576 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001577 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 sent += count;
1580 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 /* Continuation fragments (no L2CAP header) */
1584 frag = &skb_shinfo(skb)->frag_list;
1585 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 count = min_t(unsigned int, conn->mtu, len);
1588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 /* Add room for the FCS if it fits */
1590 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1591 len + L2CAP_FCS_SIZE <= conn->mtu)
1592 skblen = count + L2CAP_FCS_SIZE;
1593 else
1594 skblen = count;
1595
1596 /* Don't use bt_skb_send_alloc() while resegmenting, since
1597 * it is not ok to block.
1598 */
1599 if (reseg) {
1600 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1601 if (*frag)
1602 skb_set_owner_w(*frag, sk);
1603 } else {
1604 *frag = bt_skb_send_alloc(sk, skblen,
1605 msg->msg_flags & MSG_DONTWAIT, &err);
1606 }
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 return -EFAULT;
1610
1611 /* When resegmenting, data is copied from kernel space */
1612 if (reseg) {
1613 err = memcpy_fromkvec(skb_put(*frag, count),
1614 (struct kvec *) msg->msg_iov,
1615 count);
1616 } else {
1617 err = memcpy_fromiovec(skb_put(*frag, count),
1618 msg->msg_iov, count);
1619 }
1620
1621 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001622 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 sent += count;
1625 len -= count;
1626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001627 final = *frag;
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 frag = &(*frag)->next;
1630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1633 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1634 if (reseg) {
1635 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1636 GFP_ATOMIC);
1637 if (*frag)
1638 skb_set_owner_w(*frag, sk);
1639 } else {
1640 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1641 msg->msg_flags & MSG_DONTWAIT,
1642 &err);
1643 }
1644
1645 if (!*frag)
1646 return -EFAULT;
1647
1648 final = *frag;
1649 }
1650
1651 skb_put(final, L2CAP_FCS_SIZE);
1652 }
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001655}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001658{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001659 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001660 struct sk_buff *skb;
1661 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1662 struct l2cap_hdr *lh;
1663
1664 BT_DBG("sk %p len %d", sk, (int)len);
1665
1666 count = min_t(unsigned int, (conn->mtu - hlen), len);
1667 skb = bt_skb_send_alloc(sk, count + hlen,
1668 msg->msg_flags & MSG_DONTWAIT, &err);
1669 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001670 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001671
1672 /* Create L2CAP header */
1673 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001675 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001679 if (unlikely(err < 0)) {
1680 kfree_skb(skb);
1681 return ERR_PTR(err);
1682 }
1683 return skb;
1684}
1685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001687{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001689 struct sk_buff *skb;
1690 int err, count, hlen = L2CAP_HDR_SIZE;
1691 struct l2cap_hdr *lh;
1692
1693 BT_DBG("sk %p len %d", sk, (int)len);
1694
1695 count = min_t(unsigned int, (conn->mtu - hlen), len);
1696 skb = bt_skb_send_alloc(sk, count + hlen,
1697 msg->msg_flags & MSG_DONTWAIT, &err);
1698 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001699 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001700
1701 /* Create L2CAP header */
1702 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001704 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001707 if (unlikely(err < 0)) {
1708 kfree_skb(skb);
1709 return ERR_PTR(err);
1710 }
1711 return skb;
1712}
1713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1715 struct msghdr *msg, size_t len,
1716 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001717{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001718 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 int err, count, hlen;
1720 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001721 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 if (l2cap_pi(sk)->extended_control)
1725 hlen = L2CAP_EXTENDED_HDR_SIZE;
1726 else
1727 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001728
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001729 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 if (fcs == L2CAP_FCS_CRC16)
1733 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001734
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1736 sk, msg, (int)len, (int)sdulen, hlen);
1737
1738 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1739
1740 /* Allocate extra headroom for Qualcomm PAL. This is only
1741 * necessary in two places (here and when creating sframes)
1742 * because only unfragmented iframes and sframes are sent
1743 * using AMP controllers.
1744 */
1745 if (l2cap_pi(sk)->ampcon &&
1746 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1747 reserve = BT_SKB_RESERVE_80211;
1748
1749 /* Don't use bt_skb_send_alloc() while resegmenting, since
1750 * it is not ok to block.
1751 */
1752 if (reseg) {
1753 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1754 if (skb)
1755 skb_set_owner_w(skb, sk);
1756 } else {
1757 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1758 msg->msg_flags & MSG_DONTWAIT, &err);
1759 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001760 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001761 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 if (reserve)
1764 skb_reserve(skb, reserve);
1765
1766 bt_cb(skb)->control.fcs = fcs;
1767
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001768 /* Create L2CAP header */
1769 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001770 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1771 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001773 /* Control header is populated later */
1774 if (l2cap_pi(sk)->extended_control)
1775 put_unaligned_le32(0, skb_put(skb, 4));
1776 else
1777 put_unaligned_le16(0, skb_put(skb, 2));
1778
1779 if (sdulen)
1780 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1781
1782 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001783 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001785 kfree_skb(skb);
1786 return ERR_PTR(err);
1787 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001788
1789 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001790 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791}
1792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001794{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795 struct l2cap_pinfo *pi;
1796 struct sk_buff *acked_skb;
1797 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1804 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001805
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1807 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1808
1809 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1810 ackseq = __next_seq(ackseq, pi)) {
1811
1812 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1813 if (acked_skb) {
1814 skb_unlink(acked_skb, TX_QUEUE(sk));
1815 kfree_skb(acked_skb);
1816 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001817 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001818 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001819
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820 pi->expected_ack_seq = reqseq;
1821
1822 if (pi->unacked_frames == 0)
1823 l2cap_ertm_stop_retrans_timer(pi);
1824
1825 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001826}
1827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001829{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001830 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001831 int len;
1832 int reserve = 0;
1833 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001835 if (l2cap_pi(sk)->extended_control)
1836 len = L2CAP_EXTENDED_HDR_SIZE;
1837 else
1838 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1841 len += L2CAP_FCS_SIZE;
1842
1843 /* Allocate extra headroom for Qualcomm PAL */
1844 if (l2cap_pi(sk)->ampcon &&
1845 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1846 reserve = BT_SKB_RESERVE_80211;
1847
1848 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1849
1850 if (!skb)
1851 return ERR_PTR(-ENOMEM);
1852
1853 if (reserve)
1854 skb_reserve(skb, reserve);
1855
1856 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1857 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1858 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1859
1860 if (l2cap_pi(sk)->extended_control)
1861 put_unaligned_le32(control, skb_put(skb, 4));
1862 else
1863 put_unaligned_le16(control, skb_put(skb, 2));
1864
1865 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1866 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001868 }
1869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 return skb;
1871}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001872
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001873static void l2cap_ertm_send_sframe(struct sock *sk,
1874 struct bt_l2cap_control *control)
1875{
1876 struct l2cap_pinfo *pi;
1877 struct sk_buff *skb;
1878 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001879
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 if (control->frame_type != 's')
1883 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1888 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1889 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1890 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1891 return;
1892 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1895 control->final = 1;
1896 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1897 }
1898
1899 if (control->super == L2CAP_SFRAME_RR)
1900 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1901 else if (control->super == L2CAP_SFRAME_RNR)
1902 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1903
1904 if (control->super != L2CAP_SFRAME_SREJ) {
1905 pi->last_acked_seq = control->reqseq;
1906 l2cap_ertm_stop_ack_timer(pi);
1907 }
1908
1909 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1910 (int) control->final, (int) control->poll,
1911 (int) control->super);
1912
1913 if (pi->extended_control)
1914 control_field = __pack_extended_control(control);
1915 else
1916 control_field = __pack_enhanced_control(control);
1917
1918 skb = l2cap_create_sframe_pdu(sk, control_field);
1919 if (!IS_ERR(skb))
1920 l2cap_do_send(sk, skb);
1921}
1922
1923static void l2cap_ertm_send_ack(struct sock *sk)
1924{
1925 struct l2cap_pinfo *pi = l2cap_pi(sk);
1926 struct bt_l2cap_control control;
1927 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1928 int threshold;
1929
1930 BT_DBG("sk %p", sk);
1931 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1932 (int)pi->buffer_seq);
1933
1934 memset(&control, 0, sizeof(control));
1935 control.frame_type = 's';
1936
1937 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1938 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1939 l2cap_ertm_stop_ack_timer(pi);
1940 control.super = L2CAP_SFRAME_RNR;
1941 control.reqseq = pi->buffer_seq;
1942 l2cap_ertm_send_sframe(sk, &control);
1943 } else {
1944 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1945 l2cap_ertm_send(sk);
1946 /* If any i-frames were sent, they included an ack */
1947 if (pi->buffer_seq == pi->last_acked_seq)
1948 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001949 }
1950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001951 /* Ack now if the tx window is 3/4ths full.
1952 * Calculate without mul or div
1953 */
1954 threshold = pi->tx_win;
1955 threshold += threshold << 1;
1956 threshold >>= 2;
1957
1958 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1959 threshold);
1960
1961 if (frames_to_ack >= threshold) {
1962 l2cap_ertm_stop_ack_timer(pi);
1963 control.super = L2CAP_SFRAME_RR;
1964 control.reqseq = pi->buffer_seq;
1965 l2cap_ertm_send_sframe(sk, &control);
1966 frames_to_ack = 0;
1967 }
1968
1969 if (frames_to_ack)
1970 l2cap_ertm_start_ack_timer(pi);
1971 }
1972}
1973
1974static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1975{
1976 struct l2cap_pinfo *pi;
1977 struct bt_l2cap_control control;
1978
1979 BT_DBG("sk %p, poll %d", sk, (int) poll);
1980
1981 pi = l2cap_pi(sk);
1982
1983 memset(&control, 0, sizeof(control));
1984 control.frame_type = 's';
1985 control.poll = poll;
1986
1987 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1988 control.super = L2CAP_SFRAME_RNR;
1989 else
1990 control.super = L2CAP_SFRAME_RR;
1991
1992 control.reqseq = pi->buffer_seq;
1993 l2cap_ertm_send_sframe(sk, &control);
1994}
1995
1996static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1997{
1998 struct l2cap_pinfo *pi;
1999 struct bt_l2cap_control control;
2000
2001 BT_DBG("sk %p", sk);
2002
2003 pi = l2cap_pi(sk);
2004
2005 memset(&control, 0, sizeof(control));
2006 control.frame_type = 's';
2007 control.final = 1;
2008 control.reqseq = pi->buffer_seq;
2009 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
2010
2011 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2012 control.super = L2CAP_SFRAME_RNR;
2013 l2cap_ertm_send_sframe(sk, &control);
2014 }
2015
2016 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
2017 (pi->unacked_frames > 0))
2018 l2cap_ertm_start_retrans_timer(pi);
2019
2020 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
2021
2022 /* Send pending iframes */
2023 l2cap_ertm_send(sk);
2024
2025 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
2026 /* F-bit wasn't sent in an s-frame or i-frame yet, so
2027 * send it now.
2028 */
2029 control.super = L2CAP_SFRAME_RR;
2030 l2cap_ertm_send_sframe(sk, &control);
2031 }
2032}
2033
2034static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
2035{
2036 struct bt_l2cap_control control;
2037 struct l2cap_pinfo *pi;
2038 u16 seq;
2039
2040 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2041
2042 pi = l2cap_pi(sk);
2043 memset(&control, 0, sizeof(control));
2044 control.frame_type = 's';
2045 control.super = L2CAP_SFRAME_SREJ;
2046
2047 for (seq = pi->expected_tx_seq; seq != txseq;
2048 seq = __next_seq(seq, pi)) {
2049 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2050 control.reqseq = seq;
2051 l2cap_ertm_send_sframe(sk, &control);
2052 l2cap_seq_list_append(&pi->srej_list, seq);
2053 }
2054 }
2055
2056 pi->expected_tx_seq = __next_seq(txseq, pi);
2057}
2058
2059static void l2cap_ertm_send_srej_tail(struct sock *sk)
2060{
2061 struct bt_l2cap_control control;
2062 struct l2cap_pinfo *pi;
2063
2064 BT_DBG("sk %p", sk);
2065
2066 pi = l2cap_pi(sk);
2067
2068 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2069 return;
2070
2071 memset(&control, 0, sizeof(control));
2072 control.frame_type = 's';
2073 control.super = L2CAP_SFRAME_SREJ;
2074 control.reqseq = pi->srej_list.tail;
2075 l2cap_ertm_send_sframe(sk, &control);
2076}
2077
2078static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2079{
2080 struct bt_l2cap_control control;
2081 struct l2cap_pinfo *pi;
2082 u16 initial_head;
2083 u16 seq;
2084
2085 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2086
2087 pi = l2cap_pi(sk);
2088 memset(&control, 0, sizeof(control));
2089 control.frame_type = 's';
2090 control.super = L2CAP_SFRAME_SREJ;
2091
2092 /* Capture initial list head to allow only one pass through the list. */
2093 initial_head = pi->srej_list.head;
2094
2095 do {
2096 seq = l2cap_seq_list_pop(&pi->srej_list);
2097 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2098 break;
2099
2100 control.reqseq = seq;
2101 l2cap_ertm_send_sframe(sk, &control);
2102 l2cap_seq_list_append(&pi->srej_list, seq);
2103 } while (pi->srej_list.head != initial_head);
2104}
2105
2106static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2107{
2108 struct l2cap_pinfo *pi = l2cap_pi(sk);
2109 BT_DBG("sk %p", sk);
2110
2111 pi->expected_tx_seq = pi->buffer_seq;
2112 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2113 skb_queue_purge(SREJ_QUEUE(sk));
2114 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2115}
2116
2117static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2118 struct bt_l2cap_control *control,
2119 struct sk_buff_head *skbs, u8 event)
2120{
2121 struct l2cap_pinfo *pi;
2122 int err = 0;
2123
2124 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2125 (int)event);
2126 pi = l2cap_pi(sk);
2127
2128 switch (event) {
2129 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2130 if (sk->sk_send_head == NULL)
2131 sk->sk_send_head = skb_peek(skbs);
2132
2133 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2134 l2cap_ertm_send(sk);
2135 break;
2136 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2137 BT_DBG("Enter LOCAL_BUSY");
2138 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2139
2140 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2141 /* The SREJ_SENT state must be aborted if we are to
2142 * enter the LOCAL_BUSY state.
2143 */
2144 l2cap_ertm_abort_rx_srej_sent(sk);
2145 }
2146
2147 l2cap_ertm_send_ack(sk);
2148
2149 break;
2150 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2151 BT_DBG("Exit LOCAL_BUSY");
2152 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2153
2154 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2155 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2156 pi->amp_move_state =
2157 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2158 l2cap_send_move_chan_cfm(pi->conn, pi,
2159 pi->scid,
2160 L2CAP_MOVE_CHAN_CONFIRMED);
2161 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2162 } else if (pi->amp_move_role ==
2163 L2CAP_AMP_MOVE_RESPONDER) {
2164 pi->amp_move_state =
2165 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2166 l2cap_send_move_chan_rsp(pi->conn,
2167 pi->amp_move_cmd_ident,
2168 pi->dcid,
2169 L2CAP_MOVE_CHAN_SUCCESS);
2170 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002171 break;
2172 }
2173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2175 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2176 struct bt_l2cap_control local_control;
2177
2178 memset(&local_control, 0, sizeof(local_control));
2179 local_control.frame_type = 's';
2180 local_control.super = L2CAP_SFRAME_RR;
2181 local_control.poll = 1;
2182 local_control.reqseq = pi->buffer_seq;
2183 l2cap_ertm_send_sframe(sk, &local_control);
2184
2185 pi->retry_count = 1;
2186 l2cap_ertm_start_monitor_timer(pi);
2187 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002188 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002189 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002190 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2191 l2cap_ertm_process_reqseq(sk, control->reqseq);
2192 break;
2193 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2194 l2cap_ertm_send_rr_or_rnr(sk, 1);
2195 pi->retry_count = 1;
2196 l2cap_ertm_start_monitor_timer(pi);
2197 l2cap_ertm_stop_ack_timer(pi);
2198 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2199 break;
2200 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2201 l2cap_ertm_send_rr_or_rnr(sk, 1);
2202 pi->retry_count = 1;
2203 l2cap_ertm_start_monitor_timer(pi);
2204 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2205 break;
2206 case L2CAP_ERTM_EVENT_RECV_FBIT:
2207 /* Nothing to process */
2208 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002209 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002210 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002211 }
2212
2213 return err;
2214}
2215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2217 struct bt_l2cap_control *control,
2218 struct sk_buff_head *skbs, u8 event)
2219{
2220 struct l2cap_pinfo *pi;
2221 int err = 0;
2222
2223 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2224 (int)event);
2225 pi = l2cap_pi(sk);
2226
2227 switch (event) {
2228 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2229 if (sk->sk_send_head == NULL)
2230 sk->sk_send_head = skb_peek(skbs);
2231 /* Queue data, but don't send. */
2232 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2233 break;
2234 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2235 BT_DBG("Enter LOCAL_BUSY");
2236 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2237
2238 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2239 /* The SREJ_SENT state must be aborted if we are to
2240 * enter the LOCAL_BUSY state.
2241 */
2242 l2cap_ertm_abort_rx_srej_sent(sk);
2243 }
2244
2245 l2cap_ertm_send_ack(sk);
2246
2247 break;
2248 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2249 BT_DBG("Exit LOCAL_BUSY");
2250 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2251
2252 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2253 struct bt_l2cap_control local_control;
2254 memset(&local_control, 0, sizeof(local_control));
2255 local_control.frame_type = 's';
2256 local_control.super = L2CAP_SFRAME_RR;
2257 local_control.poll = 1;
2258 local_control.reqseq = pi->buffer_seq;
2259 l2cap_ertm_send_sframe(sk, &local_control);
2260
2261 pi->retry_count = 1;
2262 l2cap_ertm_start_monitor_timer(pi);
2263 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2264 }
2265 break;
2266 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2267 l2cap_ertm_process_reqseq(sk, control->reqseq);
2268
2269 /* Fall through */
2270
2271 case L2CAP_ERTM_EVENT_RECV_FBIT:
2272 if (control && control->final) {
2273 l2cap_ertm_stop_monitor_timer(pi);
2274 if (pi->unacked_frames > 0)
2275 l2cap_ertm_start_retrans_timer(pi);
2276 pi->retry_count = 0;
2277 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2278 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2279 }
2280 break;
2281 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2282 /* Ignore */
2283 break;
2284 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2285 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2286 l2cap_ertm_send_rr_or_rnr(sk, 1);
2287 l2cap_ertm_start_monitor_timer(pi);
2288 pi->retry_count += 1;
2289 } else
2290 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2291 break;
2292 default:
2293 break;
2294 }
2295
2296 return err;
2297}
2298
2299int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2300 struct sk_buff_head *skbs, u8 event)
2301{
2302 struct l2cap_pinfo *pi;
2303 int err = 0;
2304
2305 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2306 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2307
2308 pi = l2cap_pi(sk);
2309
2310 switch (pi->tx_state) {
2311 case L2CAP_ERTM_TX_STATE_XMIT:
2312 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2313 break;
2314 case L2CAP_ERTM_TX_STATE_WAIT_F:
2315 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2316 break;
2317 default:
2318 /* Ignore event */
2319 break;
2320 }
2321
2322 return err;
2323}
2324
2325int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2326 struct msghdr *msg, size_t len, int reseg)
2327{
2328 struct sk_buff *skb;
2329 u16 sdu_len;
2330 size_t pdu_len;
2331 int err = 0;
2332 u8 sar;
2333
2334 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2335
2336 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2337 * so fragmented skbs are not used. The HCI layer's handling
2338 * of fragmented skbs is not compatible with ERTM's queueing.
2339 */
2340
2341 /* PDU size is derived from the HCI MTU */
2342 pdu_len = l2cap_pi(sk)->conn->mtu;
2343
2344 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2345 if (!l2cap_pi(sk)->ampcon)
2346 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2347
2348 /* Adjust for largest possible L2CAP overhead. */
2349 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2350
2351 /* Remote device may have requested smaller PDUs */
2352 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2353
2354 if (len <= pdu_len) {
2355 sar = L2CAP_SAR_UNSEGMENTED;
2356 sdu_len = 0;
2357 pdu_len = len;
2358 } else {
2359 sar = L2CAP_SAR_START;
2360 sdu_len = len;
2361 pdu_len -= L2CAP_SDULEN_SIZE;
2362 }
2363
2364 while (len) {
2365 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2366
2367 BT_DBG("iframe skb %p", skb);
2368
2369 if (IS_ERR(skb)) {
2370 __skb_queue_purge(seg_queue);
2371 return PTR_ERR(skb);
2372 }
2373
2374 bt_cb(skb)->control.sar = sar;
2375 __skb_queue_tail(seg_queue, skb);
2376
2377 len -= pdu_len;
2378 if (sdu_len) {
2379 sdu_len = 0;
2380 pdu_len += L2CAP_SDULEN_SIZE;
2381 }
2382
2383 if (len <= pdu_len) {
2384 sar = L2CAP_SAR_END;
2385 pdu_len = len;
2386 } else {
2387 sar = L2CAP_SAR_CONTINUE;
2388 }
2389 }
2390
2391 return err;
2392}
2393
2394static inline int is_initial_frame(u8 sar)
2395{
2396 return (sar == L2CAP_SAR_UNSEGMENTED ||
2397 sar == L2CAP_SAR_START);
2398}
2399
2400static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2401 size_t veclen)
2402{
2403 struct sk_buff *frag_iter;
2404
2405 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2406
2407 if (iv->iov_len + skb->len > veclen)
2408 return -ENOMEM;
2409
2410 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2411 iv->iov_len += skb->len;
2412
2413 skb_walk_frags(skb, frag_iter) {
2414 if (iv->iov_len + skb->len > veclen)
2415 return -ENOMEM;
2416
2417 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2418 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2419 frag_iter->len);
2420 iv->iov_len += frag_iter->len;
2421 }
2422
2423 return 0;
2424}
2425
2426int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2427{
2428 void *buf;
2429 int buflen;
2430 int err = 0;
2431 struct sk_buff *skb;
2432 struct msghdr msg;
2433 struct kvec iv;
2434 struct sk_buff_head old_frames;
2435 struct l2cap_pinfo *pi = l2cap_pi(sk);
2436
2437 BT_DBG("sk %p", sk);
2438
2439 if (skb_queue_empty(queue))
2440 return 0;
2441
2442 memset(&msg, 0, sizeof(msg));
2443 msg.msg_iov = (struct iovec *) &iv;
2444
2445 buflen = pi->omtu + L2CAP_FCS_SIZE;
2446 buf = kzalloc(buflen, GFP_TEMPORARY);
2447
2448 if (!buf) {
2449 BT_DBG("Could not allocate resegmentation buffer");
2450 return -ENOMEM;
2451 }
2452
2453 /* Move current frames off the original queue */
2454 __skb_queue_head_init(&old_frames);
2455 skb_queue_splice_tail_init(queue, &old_frames);
2456
2457 while (!skb_queue_empty(&old_frames)) {
2458 struct sk_buff_head current_sdu;
2459 u8 original_sar;
2460
2461 /* Reassemble each SDU from one or more PDUs */
2462
2463 iv.iov_base = buf;
2464 iv.iov_len = 0;
2465
2466 skb = skb_peek(&old_frames);
2467 original_sar = bt_cb(skb)->control.sar;
2468
2469 __skb_unlink(skb, &old_frames);
2470
2471 /* Append data to SDU */
2472 if (pi->extended_control)
2473 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2474 else
2475 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2476
2477 if (original_sar == L2CAP_SAR_START)
2478 skb_pull(skb, L2CAP_SDULEN_SIZE);
2479
2480 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2481
2482 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2483 iv.iov_len -= L2CAP_FCS_SIZE;
2484
2485 /* Free skb */
2486 kfree_skb(skb);
2487
2488 if (err)
2489 break;
2490
2491 while (!skb_queue_empty(&old_frames) && !err) {
2492 /* Check next frame */
2493 skb = skb_peek(&old_frames);
2494
2495 if (is_initial_frame(bt_cb(skb)->control.sar))
2496 break;
2497
2498 __skb_unlink(skb, &old_frames);
2499
2500 /* Append data to SDU */
2501 if (pi->extended_control)
2502 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2503 else
2504 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2505
2506 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2507 skb_pull(skb, L2CAP_SDULEN_SIZE);
2508
2509 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2510
2511 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2512 iv.iov_len -= L2CAP_FCS_SIZE;
2513
2514 /* Free skb */
2515 kfree_skb(skb);
2516 }
2517
2518 if (err)
2519 break;
2520
2521 /* Segment data */
2522
2523 __skb_queue_head_init(&current_sdu);
2524
2525 /* skbs for the SDU were just freed, but the
2526 * resegmenting process could produce more, smaller
2527 * skbs due to smaller PDUs and reduced HCI MTU. The
2528 * overhead from the sk_buff structs could put us over
2529 * the sk_sndbuf limit.
2530 *
2531 * Since this code is running in response to a
2532 * received poll/final packet, it cannot block.
2533 * Therefore, memory allocation needs to be allowed by
2534 * falling back to bt_skb_alloc() (with
2535 * skb_set_owner_w() to maintain sk_wmem_alloc
2536 * correctly).
2537 */
2538 msg.msg_iovlen = iv.iov_len;
2539 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2540 msg.msg_iovlen, 1);
2541
2542 if (err || skb_queue_empty(&current_sdu)) {
2543 BT_DBG("Error %d resegmenting data for socket %p",
2544 err, sk);
2545 __skb_queue_purge(&current_sdu);
2546 break;
2547 }
2548
2549 /* Fix up first PDU SAR bits */
2550 if (!is_initial_frame(original_sar)) {
2551 BT_DBG("Changing SAR bits, %d PDUs",
2552 skb_queue_len(&current_sdu));
2553 skb = skb_peek(&current_sdu);
2554
2555 if (skb_queue_len(&current_sdu) == 1) {
2556 /* Change SAR from 'unsegmented' to 'end' */
2557 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2558 } else {
2559 struct l2cap_hdr *lh;
2560 size_t hdrlen;
2561
2562 /* Change SAR from 'start' to 'continue' */
2563 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2564
2565 /* Start frames contain 2 bytes for
2566 * sdulen and continue frames don't.
2567 * Must rewrite header to eliminate
2568 * sdulen and then adjust l2cap frame
2569 * length.
2570 */
2571 if (pi->extended_control)
2572 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2573 else
2574 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2575
2576 memmove(skb->data + L2CAP_SDULEN_SIZE,
2577 skb->data, hdrlen);
2578 skb_pull(skb, L2CAP_SDULEN_SIZE);
2579 lh = (struct l2cap_hdr *)skb->data;
2580 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2581 L2CAP_SDULEN_SIZE);
2582 }
2583 }
2584
2585 /* Add to queue */
2586 skb_queue_splice_tail(&current_sdu, queue);
2587 }
2588
2589 __skb_queue_purge(&old_frames);
2590 if (err)
2591 __skb_queue_purge(queue);
2592
2593 kfree(buf);
2594
2595 BT_DBG("Queue resegmented, err=%d", err);
2596 return err;
2597}
2598
2599static void l2cap_resegment_worker(struct work_struct *work)
2600{
2601 int err = 0;
2602 struct l2cap_resegment_work *seg_work =
2603 container_of(work, struct l2cap_resegment_work, work);
2604 struct sock *sk = seg_work->sk;
2605
2606 kfree(seg_work);
2607
2608 BT_DBG("sk %p", sk);
2609 lock_sock(sk);
2610
2611 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2612 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002613 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002614 return;
2615 }
2616
2617 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2618
2619 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2620
2621 if (skb_queue_empty(TX_QUEUE(sk)))
2622 sk->sk_send_head = NULL;
2623 else
2624 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2625
2626 if (err)
2627 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2628 else
2629 l2cap_ertm_send(sk);
2630
2631 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002632 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002633}
2634
2635static int l2cap_setup_resegment(struct sock *sk)
2636{
2637 struct l2cap_resegment_work *seg_work;
2638
2639 BT_DBG("sk %p", sk);
2640
2641 if (skb_queue_empty(TX_QUEUE(sk)))
2642 return 0;
2643
2644 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2645 if (!seg_work)
2646 return -ENOMEM;
2647
2648 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002649 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650 seg_work->sk = sk;
2651
2652 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2653 kfree(seg_work);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002654 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002655 return -ENOMEM;
2656 }
2657
2658 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2659
2660 return 0;
2661}
2662
2663static inline int l2cap_rmem_available(struct sock *sk)
2664{
2665 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2666 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2667 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2668}
2669
2670static inline int l2cap_rmem_full(struct sock *sk)
2671{
2672 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2673 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2674 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2675}
2676
2677void l2cap_amp_move_init(struct sock *sk)
2678{
2679 BT_DBG("sk %p", sk);
2680
2681 if (!l2cap_pi(sk)->conn)
2682 return;
2683
2684 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2685 return;
2686
2687 if (l2cap_pi(sk)->amp_id == 0) {
2688 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2689 return;
2690 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2691 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2692 amp_create_physical(l2cap_pi(sk)->conn, sk);
2693 } else {
2694 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2695 l2cap_pi(sk)->amp_move_state =
2696 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2697 l2cap_pi(sk)->amp_move_id = 0;
2698 l2cap_amp_move_setup(sk);
2699 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2700 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2701 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2702 }
2703}
2704
2705static void l2cap_chan_ready(struct sock *sk)
2706{
2707 struct sock *parent = bt_sk(sk)->parent;
2708
2709 BT_DBG("sk %p, parent %p", sk, parent);
2710
2711 l2cap_pi(sk)->conf_state = 0;
2712 l2cap_sock_clear_timer(sk);
2713
2714 if (!parent) {
2715 /* Outgoing channel.
2716 * Wake up socket sleeping on connect.
2717 */
2718 sk->sk_state = BT_CONNECTED;
2719 sk->sk_state_change(sk);
2720 } else {
2721 /* Incoming channel.
2722 * Wake up socket sleeping on accept.
2723 */
2724 parent->sk_data_ready(parent, 0);
2725 }
2726}
2727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728/* Copy frame to all raw sockets on that connection */
2729static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2730{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002731 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002733 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
2735 BT_DBG("conn %p", conn);
2736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737 read_lock(&l->lock);
2738 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2739 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 continue;
2741
2742 /* Don't send frame to the socket it came from */
2743 if (skb->sk == sk)
2744 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002745 nskb = skb_clone(skb, GFP_ATOMIC);
2746 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 continue;
2748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 kfree_skb(nskb);
2751 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753}
2754
2755/* ---- L2CAP signalling commands ---- */
2756static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2757 u8 code, u8 ident, u16 dlen, void *data)
2758{
2759 struct sk_buff *skb, **frag;
2760 struct l2cap_cmd_hdr *cmd;
2761 struct l2cap_hdr *lh;
2762 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002763 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002765 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2766 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
2768 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770
2771 skb = bt_skb_alloc(count, GFP_ATOMIC);
2772 if (!skb)
2773 return NULL;
2774
2775 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002776 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002777
2778 if (conn->hcon->type == LE_LINK)
2779 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2780 else
2781 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2784 cmd->code = code;
2785 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002786 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 if (dlen) {
2789 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2790 memcpy(skb_put(skb, count), data, count);
2791 data += count;
2792 }
2793
2794 len -= skb->len;
2795
2796 /* Continuation fragments (no L2CAP header) */
2797 frag = &skb_shinfo(skb)->frag_list;
2798 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002799 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
2801 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2802 if (!*frag)
2803 goto fail;
2804
2805 memcpy(skb_put(*frag, count), data, count);
2806
2807 len -= count;
2808 data += count;
2809
2810 frag = &(*frag)->next;
2811 }
2812
2813 return skb;
2814
2815fail:
2816 kfree_skb(skb);
2817 return NULL;
2818}
2819
2820static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2821{
2822 struct l2cap_conf_opt *opt = *ptr;
2823 int len;
2824
2825 len = L2CAP_CONF_OPT_SIZE + opt->len;
2826 *ptr += len;
2827
2828 *type = opt->type;
2829 *olen = opt->len;
2830
2831 switch (opt->len) {
2832 case 1:
2833 *val = *((u8 *) opt->val);
2834 break;
2835
2836 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002837 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 break;
2839
2840 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002841 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 break;
2843
2844 default:
2845 *val = (unsigned long) opt->val;
2846 break;
2847 }
2848
2849 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2850 return len;
2851}
2852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2854{
2855 struct l2cap_conf_opt *opt = *ptr;
2856
2857 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2858
2859 opt->type = type;
2860 opt->len = len;
2861
2862 switch (len) {
2863 case 1:
2864 *((u8 *) opt->val) = val;
2865 break;
2866
2867 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002868 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 break;
2870
2871 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002872 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 break;
2874
2875 default:
2876 memcpy(opt->val, (void *) val, len);
2877 break;
2878 }
2879
2880 *ptr += L2CAP_CONF_OPT_SIZE + len;
2881}
2882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885 struct delayed_work *delayed =
2886 container_of(work, struct delayed_work, work);
2887 struct l2cap_pinfo *pi =
2888 container_of(delayed, struct l2cap_pinfo, ack_work);
2889 struct sock *sk = (struct sock *)pi;
2890 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002892 BT_DBG("sk %p", sk);
2893
2894 if (!sk)
2895 return;
2896
2897 lock_sock(sk);
2898
2899 if (!l2cap_pi(sk)->conn) {
2900 release_sock(sk);
2901 return;
2902 }
2903
2904 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2905 l2cap_pi(sk)->last_acked_seq,
2906 l2cap_pi(sk));
2907
2908 if (frames_to_ack)
2909 l2cap_ertm_send_rr_or_rnr(sk, 0);
2910
2911 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002912}
2913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002914static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002915{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002916 struct delayed_work *delayed =
2917 container_of(work, struct delayed_work, work);
2918 struct l2cap_pinfo *pi =
2919 container_of(delayed, struct l2cap_pinfo, retrans_work);
2920 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002922 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002924 if (!sk)
2925 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002927 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929 if (!l2cap_pi(sk)->conn) {
2930 release_sock(sk);
2931 return;
2932 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2935 release_sock(sk);
2936}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002938static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2939{
2940 struct delayed_work *delayed =
2941 container_of(work, struct delayed_work, work);
2942 struct l2cap_pinfo *pi =
2943 container_of(delayed, struct l2cap_pinfo, monitor_work);
2944 struct sock *sk = (struct sock *)pi;
2945
2946 BT_DBG("sk %p", sk);
2947
2948 if (!sk)
2949 return;
2950
2951 lock_sock(sk);
2952
2953 if (!l2cap_pi(sk)->conn) {
2954 release_sock(sk);
2955 return;
2956 }
2957
2958 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2959
2960 release_sock(sk);
2961}
2962
2963static inline void l2cap_ertm_init(struct sock *sk)
2964{
2965 l2cap_pi(sk)->next_tx_seq = 0;
2966 l2cap_pi(sk)->expected_tx_seq = 0;
2967 l2cap_pi(sk)->expected_ack_seq = 0;
2968 l2cap_pi(sk)->unacked_frames = 0;
2969 l2cap_pi(sk)->buffer_seq = 0;
2970 l2cap_pi(sk)->frames_sent = 0;
2971 l2cap_pi(sk)->last_acked_seq = 0;
2972 l2cap_pi(sk)->sdu = NULL;
2973 l2cap_pi(sk)->sdu_last_frag = NULL;
2974 l2cap_pi(sk)->sdu_len = 0;
2975 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2976
2977 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2978 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2979
2980 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2981 l2cap_pi(sk)->rx_state);
2982
2983 l2cap_pi(sk)->amp_id = 0;
2984 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2985 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2986 l2cap_pi(sk)->amp_move_reqseq = 0;
2987 l2cap_pi(sk)->amp_move_event = 0;
2988
2989 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2990 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2991 l2cap_ertm_retrans_timeout);
2992 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2993 l2cap_ertm_monitor_timeout);
2994 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2995 skb_queue_head_init(SREJ_QUEUE(sk));
2996 skb_queue_head_init(TX_QUEUE(sk));
2997
2998 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2999 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
3000 l2cap_pi(sk)->remote_tx_win);
3001}
3002
3003void l2cap_ertm_destruct(struct sock *sk)
3004{
3005 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
3006 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
3007}
3008
3009void l2cap_ertm_shutdown(struct sock *sk)
3010{
3011 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
3012 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
3013 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
3014}
3015
3016void l2cap_ertm_recv_done(struct sock *sk)
3017{
3018 lock_sock(sk);
3019
3020 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
3021 release_sock(sk);
3022 return;
3023 }
3024
3025 /* Consume any queued incoming frames and update local busy status */
3026 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
3027 l2cap_ertm_rx_queued_iframes(sk))
3028 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
3029 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3030 l2cap_rmem_available(sk))
3031 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
3032
3033 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003034}
3035
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003036static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3037{
3038 switch (mode) {
3039 case L2CAP_MODE_STREAMING:
3040 case L2CAP_MODE_ERTM:
3041 if (l2cap_mode_supported(mode, remote_feat_mask))
3042 return mode;
3043 /* fall through */
3044 default:
3045 return L2CAP_MODE_BASIC;
3046 }
3047}
3048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003049static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003051 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3052 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3053 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3054 pi->extended_control = 1;
3055 } else {
3056 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3057 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3058
3059 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3060 pi->extended_control = 0;
3061 }
3062}
3063
3064static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3065 struct hci_ext_fs *new,
3066 struct hci_ext_fs *agg)
3067{
3068 *agg = *cur;
3069 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3070 /* current flow spec has known rate */
3071 if ((new->max_sdu == 0xFFFF) ||
3072 (new->sdu_arr_time == 0xFFFFFFFF)) {
3073 /* new fs has unknown rate, so aggregate is unknown */
3074 agg->max_sdu = 0xFFFF;
3075 agg->sdu_arr_time = 0xFFFFFFFF;
3076 } else {
3077 /* new fs has known rate, so aggregate is known */
3078 u64 cur_rate;
3079 u64 new_rate;
3080 cur_rate = cur->max_sdu * 1000000ULL;
3081 if (cur->sdu_arr_time)
3082 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3083 new_rate = new->max_sdu * 1000000ULL;
3084 if (new->sdu_arr_time)
3085 new_rate = div_u64(new_rate, new->sdu_arr_time);
3086 cur_rate = cur_rate + new_rate;
3087 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3088 cur_rate);
3089 }
3090 }
3091}
3092
3093static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3094{
3095 struct hci_ext_fs tx_fs;
3096 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003098 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3101 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3102 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3103 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3104 return 0;
3105
3106 l2cap_aggregate_fs(&chan->tx_fs,
3107 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3108 l2cap_aggregate_fs(&chan->rx_fs,
3109 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3110 hci_chan_modify(chan, &tx_fs, &rx_fs);
3111 return 1;
3112}
3113
3114static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3115 struct hci_ext_fs *old,
3116 struct hci_ext_fs *agg)
3117{
3118 *agg = *cur;
3119 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3120 u64 cur_rate;
3121 u64 old_rate;
3122 cur_rate = cur->max_sdu * 1000000ULL;
3123 if (cur->sdu_arr_time)
3124 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3125 old_rate = old->max_sdu * 1000000ULL;
3126 if (old->sdu_arr_time)
3127 old_rate = div_u64(old_rate, old->sdu_arr_time);
3128 cur_rate = cur_rate - old_rate;
3129 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3130 cur_rate);
3131 }
3132}
3133
3134static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3135{
3136 struct hci_ext_fs tx_fs;
3137 struct hci_ext_fs rx_fs;
3138
3139 BT_DBG("chan %p", chan);
3140
3141 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3142 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3143 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3144 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3145 return 0;
3146
3147 l2cap_deaggregate_fs(&chan->tx_fs,
3148 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3149 l2cap_deaggregate_fs(&chan->rx_fs,
3150 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3151 hci_chan_modify(chan, &tx_fs, &rx_fs);
3152 return 1;
3153}
3154
3155static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3156{
3157 struct hci_dev *hdev;
3158 struct hci_conn *hcon;
3159 struct hci_chan *chan;
3160
3161 hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
3162 if (!hdev)
3163 return NULL;
3164
3165 BT_DBG("hdev %s", hdev->name);
3166
3167 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003168 if (!hcon) {
3169 chan = NULL;
3170 goto done;
3171 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172
3173 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3174 if (chan) {
3175 l2cap_aggregate(chan, pi);
3176 hci_chan_hold(chan);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003177 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 }
3179
3180 if (bt_sk(pi)->parent) {
3181 /* Incoming connection */
3182 chan = hci_chan_accept(hcon,
3183 (struct hci_ext_fs *) &pi->local_fs,
3184 (struct hci_ext_fs *) &pi->remote_fs);
3185 } else {
3186 /* Outgoing connection */
3187 chan = hci_chan_create(hcon,
3188 (struct hci_ext_fs *) &pi->local_fs,
3189 (struct hci_ext_fs *) &pi->remote_fs);
3190 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08003191done:
3192 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003193 return chan;
3194}
3195
3196int l2cap_build_conf_req(struct sock *sk, void *data)
3197{
3198 struct l2cap_pinfo *pi = l2cap_pi(sk);
3199 struct l2cap_conf_req *req = data;
3200 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3201 void *ptr = req->data;
3202
3203 BT_DBG("sk %p", sk);
3204
3205 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003206 goto done;
3207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003208 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003209 case L2CAP_MODE_STREAMING:
3210 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003211 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003212 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003213
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003214 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003215 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003216 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003217 break;
3218 }
3219
3220done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003221 if (pi->imtu != L2CAP_DEFAULT_MTU)
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003224 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003225 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003226 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3227 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003228 break;
3229
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003230 rfc.txwin_size = 0;
3231 rfc.max_transmit = 0;
3232 rfc.retrans_timeout = 0;
3233 rfc.monitor_timeout = 0;
3234 rfc.max_pdu_size = 0;
3235
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3237 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003238 break;
3239
3240 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003241 l2cap_setup_txwin(pi);
3242 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3243 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3244 else
3245 rfc.txwin_size = pi->tx_win;
3246 rfc.max_transmit = pi->max_tx;
3247 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3248 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003249 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003250 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3251 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003252
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3254 (unsigned long) &rfc);
3255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003256 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3257 pi->extended_control) {
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3259 pi->tx_win);
3260 }
3261
3262 if (pi->amp_id) {
3263 /* default best effort extended flow spec */
3264 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3265 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3267 sizeof(fs), (unsigned long) &fs);
3268 }
3269
3270 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003271 break;
3272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003273 if (pi->fcs == L2CAP_FCS_NONE ||
3274 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3275 pi->fcs = L2CAP_FCS_NONE;
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003277 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003278 break;
3279
3280 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003281 rfc.txwin_size = 0;
3282 rfc.max_transmit = 0;
3283 rfc.retrans_timeout = 0;
3284 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003285 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003286 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3287 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003288
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003289 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3290 (unsigned long) &rfc);
3291
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003292 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3293 pi->extended_control) {
3294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3295 }
3296
3297 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003298 break;
3299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003300 if (pi->fcs == L2CAP_FCS_NONE ||
3301 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3302 pi->fcs = L2CAP_FCS_NONE;
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003304 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003305 break;
3306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003308 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003309 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310
3311 return ptr - data;
3312}
3313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003314
3315static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003317 struct l2cap_pinfo *pi = l2cap_pi(sk);
3318 struct l2cap_conf_req *req = data;
3319 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3320 void *ptr = req->data;
3321 u32 be_flush_to;
3322
3323 BT_DBG("sk %p", sk);
3324
3325 /* convert to milliseconds, round up */
3326 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3327
3328 switch (pi->mode) {
3329 case L2CAP_MODE_ERTM:
3330 rfc.mode = L2CAP_MODE_ERTM;
3331 rfc.txwin_size = pi->tx_win;
3332 rfc.max_transmit = pi->max_tx;
3333 if (pi->amp_move_id) {
3334 rfc.retrans_timeout =
3335 cpu_to_le16((3 * be_flush_to) + 500);
3336 rfc.monitor_timeout =
3337 cpu_to_le16((3 * be_flush_to) + 500);
3338 } else {
3339 rfc.retrans_timeout =
3340 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3341 rfc.monitor_timeout =
3342 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3343 }
3344 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3345 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3346 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3347
3348 break;
3349
3350 default:
3351 return -ECONNREFUSED;
3352 }
3353
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3355 (unsigned long) &rfc);
3356
3357 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3358
3359 /* TODO assign fcs for br/edr based on socket config option */
3360 if (pi->amp_move_id)
3361 pi->local_conf.fcs = L2CAP_FCS_NONE;
3362 else
3363 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3364
3365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3366 pi->local_conf.fcs);
3367
3368 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3369 }
3370
3371 req->dcid = cpu_to_le16(pi->dcid);
3372 req->flags = cpu_to_le16(0);
3373
3374 return ptr - data;
3375}
3376
3377static int l2cap_parse_conf_req(struct sock *sk, void *data)
3378{
3379 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003380 struct l2cap_conf_rsp *rsp = data;
3381 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003382 void *req = pi->conf_req;
3383 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003384 int type, hint, olen;
3385 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003386 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003387 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003388 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003389 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003391 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003392
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003393 while (len >= L2CAP_CONF_OPT_SIZE) {
3394 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003396 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003397 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003398
3399 switch (type) {
3400 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003401 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003402 break;
3403
3404 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003405 pi->flush_to = val;
3406 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3407 result = L2CAP_CONF_UNACCEPT;
3408 else
3409 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003410 break;
3411
3412 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003413 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3414 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003415 break;
3416
Marcel Holtmann6464f352007-10-20 13:39:51 +02003417 case L2CAP_CONF_RFC:
3418 if (olen == sizeof(rfc))
3419 memcpy(&rfc, (void *) val, olen);
3420 break;
3421
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003422 case L2CAP_CONF_FCS:
3423 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003424 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3425 pi->remote_conf.fcs = val;
3426 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003428 case L2CAP_CONF_EXT_FS:
3429 if (olen == sizeof(fs)) {
3430 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3431 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3432 result = L2CAP_CONF_UNACCEPT;
3433 break;
3434 }
3435 memcpy(&fs, (void *) val, olen);
3436 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3437 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3438 break;
3439 }
3440 pi->remote_conf.flush_to =
3441 le32_to_cpu(fs.flush_to);
3442 pi->remote_fs.id = fs.id;
3443 pi->remote_fs.type = fs.type;
3444 pi->remote_fs.max_sdu =
3445 le16_to_cpu(fs.max_sdu);
3446 pi->remote_fs.sdu_arr_time =
3447 le32_to_cpu(fs.sdu_arr_time);
3448 pi->remote_fs.acc_latency =
3449 le32_to_cpu(fs.acc_latency);
3450 pi->remote_fs.flush_to =
3451 le32_to_cpu(fs.flush_to);
3452 }
3453 break;
3454
3455 case L2CAP_CONF_EXT_WINDOW:
3456 pi->extended_control = 1;
3457 pi->remote_tx_win = val;
3458 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3459 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003460 break;
3461
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003462 default:
3463 if (hint)
3464 break;
3465
3466 result = L2CAP_CONF_UNKNOWN;
3467 *((u8 *) ptr++) = type;
3468 break;
3469 }
3470 }
3471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003472 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003473 goto done;
3474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003475 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003476 case L2CAP_MODE_STREAMING:
3477 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003478 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3479 pi->mode = l2cap_select_mode(rfc.mode,
3480 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003481 break;
3482 }
3483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003486
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003487 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003488 }
3489
3490done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003491 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003492 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003493 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003495 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003496 return -ECONNREFUSED;
3497
3498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3499 sizeof(rfc), (unsigned long) &rfc);
3500 }
3501
3502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003503 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3504 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3505 return -ECONNREFUSED;
3506
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003507 if (result == L2CAP_CONF_SUCCESS) {
3508 /* Configure output options and let the other side know
3509 * which ones we don't like. */
3510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003511 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003512 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003513 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003514 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003515 else {
3516 pi->omtu = mtu;
3517 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3518 }
3519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003520
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003521 switch (rfc.mode) {
3522 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003523 pi->fcs = L2CAP_FCS_NONE;
3524 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003525 break;
3526
3527 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003528 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3529 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003530
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003531 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003533 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003534
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003535 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003536 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003537 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003538 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003540 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003541
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3543 sizeof(rfc), (unsigned long) &rfc);
3544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003545 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3547 sizeof(fs), (unsigned long) &fs);
3548
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003549 break;
3550
3551 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003552 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003554 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003555
3556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3557 sizeof(rfc), (unsigned long) &rfc);
3558
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003559 break;
3560
3561 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003562 result = L2CAP_CONF_UNACCEPT;
3563
3564 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003565 rfc.mode = pi->mode;
3566 }
3567
3568 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3569 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3570 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3571 result = L2CAP_CONF_PENDING;
3572
3573 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3574 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003575 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003576 /* Trigger logical link creation only on AMP */
3577
Peter Krystadf453bb32011-07-19 17:23:34 -07003578 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003579 if (!chan)
3580 return -ECONNREFUSED;
3581
3582 chan->l2cap_sk = sk;
3583 if (chan->state == BT_CONNECTED)
3584 l2cap_create_cfm(chan, 0);
3585 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003586 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003587
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003588 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003589 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003590 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003591 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003592 rsp->result = cpu_to_le16(result);
3593 rsp->flags = cpu_to_le16(0x0000);
3594
3595 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596}
3597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003598static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003599{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003600 struct l2cap_pinfo *pi = l2cap_pi(sk);
3601 struct l2cap_conf_rsp *rsp = data;
3602 void *ptr = rsp->data;
3603 void *req = pi->conf_req;
3604 int len = pi->conf_len;
3605 int type, hint, olen;
3606 unsigned long val;
3607 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3608 struct l2cap_conf_ext_fs fs;
3609 u16 mtu = pi->omtu;
3610 u16 tx_win = pi->remote_tx_win;
3611 u16 result = L2CAP_CONF_SUCCESS;
3612
3613 BT_DBG("sk %p", sk);
3614
3615 while (len >= L2CAP_CONF_OPT_SIZE) {
3616 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3617
3618 hint = type & L2CAP_CONF_HINT;
3619 type &= L2CAP_CONF_MASK;
3620
3621 switch (type) {
3622 case L2CAP_CONF_MTU:
3623 mtu = val;
3624 break;
3625
3626 case L2CAP_CONF_FLUSH_TO:
3627 if (pi->amp_move_id)
3628 result = L2CAP_CONF_UNACCEPT;
3629 else
3630 pi->remote_conf.flush_to = val;
3631 break;
3632
3633 case L2CAP_CONF_QOS:
3634 if (pi->amp_move_id)
3635 result = L2CAP_CONF_UNACCEPT;
3636 break;
3637
3638 case L2CAP_CONF_RFC:
3639 if (olen == sizeof(rfc))
3640 memcpy(&rfc, (void *) val, olen);
3641 if (pi->mode != rfc.mode ||
3642 rfc.mode == L2CAP_MODE_BASIC)
3643 result = L2CAP_CONF_UNACCEPT;
3644 break;
3645
3646 case L2CAP_CONF_FCS:
3647 pi->remote_conf.fcs = val;
3648 break;
3649
3650 case L2CAP_CONF_EXT_FS:
3651 if (olen == sizeof(fs)) {
3652 memcpy(&fs, (void *) val, olen);
3653 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3654 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3655 else {
3656 pi->remote_conf.flush_to =
3657 le32_to_cpu(fs.flush_to);
3658 }
3659 }
3660 break;
3661
3662 case L2CAP_CONF_EXT_WINDOW:
3663 tx_win = val;
3664 break;
3665
3666 default:
3667 if (hint)
3668 break;
3669
3670 result = L2CAP_CONF_UNKNOWN;
3671 *((u8 *) ptr++) = type;
3672 break;
3673 }
3674 }
3675
3676 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3677 result, pi->mode, rfc.mode);
3678
3679 if (result == L2CAP_CONF_SUCCESS) {
3680 /* Configure output options and let the other side know
3681 * which ones we don't like. */
3682
3683 /* Don't allow mtu to decrease. */
3684 if (mtu < pi->omtu)
3685 result = L2CAP_CONF_UNACCEPT;
3686
3687 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3688
3689 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3690
3691 /* Don't allow extended transmit window to change. */
3692 if (tx_win != pi->remote_tx_win) {
3693 result = L2CAP_CONF_UNACCEPT;
3694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3695 pi->remote_tx_win);
3696 }
3697
3698 if (rfc.mode == L2CAP_MODE_ERTM) {
3699 pi->remote_conf.retrans_timeout =
3700 le16_to_cpu(rfc.retrans_timeout);
3701 pi->remote_conf.monitor_timeout =
3702 le16_to_cpu(rfc.monitor_timeout);
3703
3704 BT_DBG("remote conf monitor timeout %d",
3705 pi->remote_conf.monitor_timeout);
3706
3707 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3708 sizeof(rfc), (unsigned long) &rfc);
3709 }
3710
3711 }
3712
3713 if (result != L2CAP_CONF_SUCCESS)
3714 goto done;
3715
3716 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3717
3718 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3719 pi->flush_to = pi->remote_conf.flush_to;
3720 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3721
3722 if (pi->amp_move_id)
3723 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3724 else
3725 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3726 BT_DBG("mode %d monitor timeout %d",
3727 pi->mode, pi->monitor_timeout);
3728
3729 }
3730
3731done:
3732 rsp->scid = cpu_to_le16(pi->dcid);
3733 rsp->result = cpu_to_le16(result);
3734 rsp->flags = cpu_to_le16(0x0000);
3735
3736 return ptr - data;
3737}
3738
3739static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3740{
3741 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003742 struct l2cap_conf_req *req = data;
3743 void *ptr = req->data;
3744 int type, olen;
3745 unsigned long val;
3746 struct l2cap_conf_rfc rfc;
3747
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003748 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003749
3750 while (len >= L2CAP_CONF_OPT_SIZE) {
3751 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3752
3753 switch (type) {
3754 case L2CAP_CONF_MTU:
3755 if (val < L2CAP_DEFAULT_MIN_MTU) {
3756 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003757 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003758 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003759 pi->imtu = val;
3760 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003761 break;
3762
3763 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003764 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003766 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003767 break;
3768
3769 case L2CAP_CONF_RFC:
3770 if (olen == sizeof(rfc))
3771 memcpy(&rfc, (void *)val, olen);
3772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003773 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3774 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003775 return -ECONNREFUSED;
3776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003777 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003778
3779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3780 sizeof(rfc), (unsigned long) &rfc);
3781 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003782
3783 case L2CAP_CONF_EXT_WINDOW:
3784 pi->tx_win = val;
3785
3786 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3787 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3788
3789 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3790 2, pi->tx_win);
3791 break;
3792
3793 default:
3794 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003795 }
3796 }
3797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003798 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003799 return -ECONNREFUSED;
3800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003801 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003802
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003803 if (*result == L2CAP_CONF_SUCCESS) {
3804 switch (rfc.mode) {
3805 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003806 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3807 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3808 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003809 break;
3810 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003811 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003812 }
3813 }
3814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003815 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003816 req->flags = cpu_to_le16(0x0000);
3817
3818 return ptr - data;
3819}
3820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003821static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822{
3823 struct l2cap_conf_rsp *rsp = data;
3824 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003826 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003828 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003829 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003830 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
3832 return ptr - data;
3833}
3834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003835static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003836{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003837 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003838 int type, olen;
3839 unsigned long val;
3840 struct l2cap_conf_rfc rfc;
3841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003842 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003844 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003845 return;
3846
3847 while (len >= L2CAP_CONF_OPT_SIZE) {
3848 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3849
3850 switch (type) {
3851 case L2CAP_CONF_RFC:
3852 if (olen == sizeof(rfc))
3853 memcpy(&rfc, (void *)val, olen);
3854 goto done;
3855 }
3856 }
3857
3858done:
3859 switch (rfc.mode) {
3860 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003861 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3862 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3863 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003864 break;
3865 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003866 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003867 }
3868}
3869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003870static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3871{
3872 struct l2cap_pinfo *pi = l2cap_pi(sk);
3873 int type, olen;
3874 unsigned long val;
3875 struct l2cap_conf_ext_fs fs;
3876
3877 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3878
3879 while (len >= L2CAP_CONF_OPT_SIZE) {
3880 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3881 if ((type == L2CAP_CONF_EXT_FS) &&
3882 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3883 memcpy(&fs, (void *)val, olen);
3884 pi->local_fs.id = fs.id;
3885 pi->local_fs.type = fs.type;
3886 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3887 pi->local_fs.sdu_arr_time =
3888 le32_to_cpu(fs.sdu_arr_time);
3889 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3890 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3891 break;
3892 }
3893 }
3894
3895}
3896
3897static int l2cap_finish_amp_move(struct sock *sk)
3898{
3899 struct l2cap_pinfo *pi;
3900 int err;
3901
3902 BT_DBG("sk %p", sk);
3903
3904 pi = l2cap_pi(sk);
3905
3906 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3907 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3908
3909 if (pi->ampcon)
3910 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3911 else
3912 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3913
3914 err = l2cap_setup_resegment(sk);
3915
3916 return err;
3917}
3918
3919static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3920 u16 result)
3921{
3922 int err = 0;
3923 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3925
3926 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3927
3928 if (pi->reconf_state == L2CAP_RECONF_NONE)
3929 return -ECONNREFUSED;
3930
3931 if (result == L2CAP_CONF_SUCCESS) {
3932 while (len >= L2CAP_CONF_OPT_SIZE) {
3933 int type, olen;
3934 unsigned long val;
3935
3936 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3937
3938 if (type == L2CAP_CONF_RFC) {
3939 if (olen == sizeof(rfc))
3940 memcpy(&rfc, (void *)val, olen);
3941 if (rfc.mode != pi->mode &&
3942 rfc.mode != L2CAP_MODE_ERTM) {
3943 err = -ECONNREFUSED;
3944 goto done;
3945 }
3946 break;
3947 }
3948 }
3949 }
3950
3951done:
3952 l2cap_ertm_stop_ack_timer(pi);
3953 l2cap_ertm_stop_retrans_timer(pi);
3954 l2cap_ertm_stop_monitor_timer(pi);
3955
3956 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3957 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3958
3959 /* Respond to poll */
3960 err = l2cap_answer_move_poll(sk);
3961
3962 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3963
3964 /* If moving to BR/EDR, use default timeout defined by
3965 * the spec */
3966 if (pi->amp_move_id == 0)
3967 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3968
3969 if (pi->mode == L2CAP_MODE_ERTM) {
3970 l2cap_ertm_tx(sk, NULL, NULL,
3971 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3972 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3973 }
3974 }
3975
3976 return err;
3977}
3978
3979
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003980static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3981{
3982 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3983
3984 if (rej->reason != 0x0000)
3985 return 0;
3986
3987 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3988 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003989 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003990
3991 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003992 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003993
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003994 l2cap_conn_start(conn);
3995 }
3996
3997 return 0;
3998}
3999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004000static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
4001 struct l2cap_cmd_hdr *cmd,
4002 u8 *data, u8 rsp_code,
4003 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004005 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4007 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04004008 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004009 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010
4011 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004012 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013
4014 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
4015
4016 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004017 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
4018 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 result = L2CAP_CR_BAD_PSM;
4020 goto sendresp;
4021 }
4022
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00004023 bh_lock_sock(parent);
4024
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004025 /* Check if the ACL is secure enough (if not SDP) */
4026 if (psm != cpu_to_le16(0x0001) &&
4027 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01004028 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004029 result = L2CAP_CR_SEC_BLOCK;
4030 goto response;
4031 }
4032
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 result = L2CAP_CR_NO_MEM;
4034
4035 /* Check for backlog size */
4036 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004037 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 goto response;
4039 }
4040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004041 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
4042 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 goto response;
4044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004045 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046
4047 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004048 if (__l2cap_get_chan_by_dcid(list, scid)) {
4049 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 l2cap_sock_kill(sk);
4052 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 goto response;
4054 }
4055
4056 hci_conn_hold(conn->hcon);
4057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004058 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 bacpy(&bt_sk(sk)->src, conn->src);
4060 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061 l2cap_pi(sk)->psm = psm;
4062 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004064 bt_accept_enqueue(parent, sk);
4065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004066 __l2cap_chan_add(conn, sk);
4067 dcid = l2cap_pi(sk)->scid;
4068 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004070 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004072 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073
Marcel Holtmann984947d2009-02-06 23:35:19 +01004074 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004075 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004076 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004077 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004078 result = L2CAP_CR_PEND;
4079 status = L2CAP_CS_AUTHOR_PEND;
4080 parent->sk_data_ready(parent, 0);
4081 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004082 /* Force pending result for AMP controllers.
4083 * The connection will succeed after the
4084 * physical link is up. */
4085 if (amp_id) {
4086 sk->sk_state = BT_CONNECT2;
4087 result = L2CAP_CR_PEND;
4088 } else {
4089 sk->sk_state = BT_CONFIG;
4090 result = L2CAP_CR_SUCCESS;
4091 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004092 status = L2CAP_CS_NO_INFO;
4093 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004094 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004095 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004096 result = L2CAP_CR_PEND;
4097 status = L2CAP_CS_AUTHEN_PEND;
4098 }
4099 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004100 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004101 result = L2CAP_CR_PEND;
4102 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 }
4104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004105 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
4107response:
4108 bh_unlock_sock(parent);
4109
4110sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004111 rsp.scid = cpu_to_le16(scid);
4112 rsp.dcid = cpu_to_le16(dcid);
4113 rsp.result = cpu_to_le16(result);
4114 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004115 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004117 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004118 struct l2cap_info_req info;
4119 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4120
4121 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4122 conn->info_ident = l2cap_get_ident(conn);
4123
4124 mod_timer(&conn->info_timer, jiffies +
4125 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4126
4127 l2cap_send_cmd(conn, conn->info_ident,
4128 L2CAP_INFO_REQ, sizeof(info), &info);
4129 }
4130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004131 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004132 result == L2CAP_CR_SUCCESS) {
4133 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004134 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004135 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004136 l2cap_build_conf_req(sk, buf), buf);
4137 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004138 }
4139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004140 return sk;
4141}
4142
4143static inline int l2cap_connect_req(struct l2cap_conn *conn,
4144 struct l2cap_cmd_hdr *cmd, u8 *data)
4145{
4146 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 return 0;
4148}
4149
4150static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4151{
4152 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4153 u16 scid, dcid, result, status;
4154 struct sock *sk;
4155 u8 req[128];
4156
4157 scid = __le16_to_cpu(rsp->scid);
4158 dcid = __le16_to_cpu(rsp->dcid);
4159 result = __le16_to_cpu(rsp->result);
4160 status = __le16_to_cpu(rsp->status);
4161
4162 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4163
4164 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004165 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4166 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004167 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004169 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4170 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004171 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172 }
4173
4174 switch (result) {
4175 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004176 sk->sk_state = BT_CONFIG;
4177 l2cap_pi(sk)->ident = 0;
4178 l2cap_pi(sk)->dcid = dcid;
4179 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004181 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004182 break;
4183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004184 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4185
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004187 l2cap_build_conf_req(sk, req), req);
4188 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 break;
4190
4191 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004192 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 break;
4194
4195 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004196 /* don't delete l2cap channel if sk is owned by user */
4197 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004198 sk->sk_state = BT_DISCONN;
4199 l2cap_sock_clear_timer(sk);
4200 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004201 break;
4202 }
4203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004204 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 break;
4206 }
4207
4208 bh_unlock_sock(sk);
4209 return 0;
4210}
4211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004212static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004213{
4214 /* FCS is enabled only in ERTM or streaming mode, if one or both
4215 * sides request it.
4216 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004217 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4218 pi->fcs = L2CAP_FCS_NONE;
4219 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4220 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004221}
4222
Al Viro88219a02007-07-29 00:17:25 -07004223static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224{
4225 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4226 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004227 u8 rspbuf[64];
4228 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004230 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004231 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
4233 dcid = __le16_to_cpu(req->dcid);
4234 flags = __le16_to_cpu(req->flags);
4235
4236 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004238 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4239 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 return -ENOENT;
4241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004242 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4243 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4244 sk->sk_state, l2cap_pi(sk)->rx_state,
4245 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4246 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004248 /* Detect a reconfig request due to channel move between
4249 * BR/EDR and AMP
4250 */
4251 if (sk->sk_state == BT_CONNECTED &&
4252 l2cap_pi(sk)->rx_state ==
4253 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4254 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4255
4256 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4257 amp_move_reconf = 1;
4258
4259 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004260 struct l2cap_cmd_rej rej;
4261
4262 rej.reason = cpu_to_le16(0x0002);
4263 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4264 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004265 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004266 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004267
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004268 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004269 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004270 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004272 l2cap_build_conf_rsp(sk, rspbuf,
4273 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004274 goto unlock;
4275 }
4276
4277 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004278 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4279 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
4281 if (flags & 0x0001) {
4282 /* Incomplete config. Send empty response. */
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004284 l2cap_build_conf_rsp(sk, rspbuf,
4285 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 goto unlock;
4287 }
4288
4289 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004290 if (!amp_move_reconf)
4291 len = l2cap_parse_conf_req(sk, rspbuf);
4292 else
4293 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4294
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004295 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004296 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004300 l2cap_pi(sk)->conf_ident = cmd->ident;
4301 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4302
4303 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4304 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4305 !l2cap_pi(sk)->amp_id) {
4306 /* Send success response right after pending if using
4307 * lockstep config on BR/EDR
4308 */
4309 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4310 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4311 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4312 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004313
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004314 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004315 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004317 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004318 goto unlock;
4319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004320 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004322 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4323 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004325 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4326 set_default_fcs(l2cap_pi(sk));
4327
4328 sk->sk_state = BT_CONNECTED;
4329
4330 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4331 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4332 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004333
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004335 goto unlock;
4336 }
4337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004338 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004339 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004340 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004342 l2cap_build_conf_req(sk, buf), buf);
4343 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 }
4345
4346unlock:
4347 bh_unlock_sock(sk);
4348 return 0;
4349}
4350
4351static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4352{
4353 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4354 u16 scid, flags, result;
4355 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004356 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004357 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358
4359 scid = __le16_to_cpu(rsp->scid);
4360 flags = __le16_to_cpu(rsp->flags);
4361 result = __le16_to_cpu(rsp->result);
4362
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004363 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4364 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004366 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4367 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 return 0;
4369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004370 pi = l2cap_pi(sk);
4371
4372 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4373 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4374 goto done;
4375 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004376
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 switch (result) {
4378 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004379 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4380 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4381 /* Lockstep procedure requires a pending response
4382 * before success.
4383 */
4384 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4385 goto done;
4386 }
4387
4388 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 break;
4390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004391 case L2CAP_CONF_PENDING:
4392 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4393 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4394 goto done;
4395 }
4396
4397 l2cap_conf_rfc_get(sk, rsp->data, len);
4398
4399 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4400
Peter Krystadf453bb32011-07-19 17:23:34 -07004401 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004402
4403 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4404 struct hci_chan *chan;
4405
4406 /* Already sent a 'pending' response, so set up
4407 * the logical link now
4408 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004409 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004410 if (!chan) {
4411 l2cap_send_disconn_req(pi->conn, sk,
4412 ECONNRESET);
4413 goto done;
4414 }
4415
4416 chan->l2cap_sk = sk;
4417 if (chan->state == BT_CONNECTED)
4418 l2cap_create_cfm(chan, 0);
4419 }
4420
4421 goto done;
4422
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004424 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004425 char req[64];
4426
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004427 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004428 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004429 goto done;
4430 }
4431
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004432 /* throw out any old stored conf requests */
4433 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004434 len = l2cap_parse_conf_rsp(sk, rsp->data,
4435 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004436 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004437 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004438 goto done;
4439 }
4440
4441 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4442 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004443 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004444 if (result != L2CAP_CONF_SUCCESS)
4445 goto done;
4446 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 }
4448
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004449 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004450 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004451 l2cap_sock_set_timer(sk, HZ * 5);
4452 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 goto done;
4454 }
4455
4456 if (flags & 0x01)
4457 goto done;
4458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004459 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004461 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4462 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004464 sk->sk_state = BT_CONNECTED;
4465
4466 if (pi->mode == L2CAP_MODE_ERTM ||
4467 pi->mode == L2CAP_MODE_STREAMING)
4468 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 l2cap_chan_ready(sk);
4471 }
4472
4473done:
4474 bh_unlock_sock(sk);
4475 return 0;
4476}
4477
4478static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4479{
4480 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4481 struct l2cap_disconn_rsp rsp;
4482 u16 dcid, scid;
4483 struct sock *sk;
4484
4485 scid = __le16_to_cpu(req->scid);
4486 dcid = __le16_to_cpu(req->dcid);
4487
4488 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004490 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4491 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 return 0;
4493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004494 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4495 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004498 /* Only do cleanup if a disconnect request was not sent already */
4499 if (sk->sk_state != BT_DISCONN) {
4500 sk->sk_shutdown = SHUTDOWN_MASK;
4501
4502 skb_queue_purge(TX_QUEUE(sk));
4503
4504 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4505 skb_queue_purge(SREJ_QUEUE(sk));
4506
4507 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4508 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4509 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4510 }
4511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004513 /* don't delete l2cap channel if sk is owned by user */
4514 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004515 sk->sk_state = BT_DISCONN;
4516 l2cap_sock_clear_timer(sk);
4517 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004518 bh_unlock_sock(sk);
4519 return 0;
4520 }
4521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004522 l2cap_chan_del(sk, ECONNRESET);
4523
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 bh_unlock_sock(sk);
4525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004526 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 return 0;
4528}
4529
4530static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4531{
4532 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4533 u16 dcid, scid;
4534 struct sock *sk;
4535
4536 scid = __le16_to_cpu(rsp->scid);
4537 dcid = __le16_to_cpu(rsp->dcid);
4538
4539 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004541 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4542 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543 return 0;
4544
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004545 /* don't delete l2cap channel if sk is owned by user */
4546 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004547 sk->sk_state = BT_DISCONN;
4548 l2cap_sock_clear_timer(sk);
4549 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004550 bh_unlock_sock(sk);
4551 return 0;
4552 }
4553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004554 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 bh_unlock_sock(sk);
4556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004557 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 return 0;
4559}
4560
4561static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4562{
4563 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 u16 type;
4565
4566 type = __le16_to_cpu(req->type);
4567
4568 BT_DBG("type 0x%4.4x", type);
4569
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004570 if (type == L2CAP_IT_FEAT_MASK) {
4571 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004572 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004573 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4574 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4575 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004576 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004577 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004578 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004579 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004580 l2cap_send_cmd(conn, cmd->ident,
4581 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004582 } else if (type == L2CAP_IT_FIXED_CHAN) {
4583 u8 buf[12];
4584 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4585 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4586 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4587 memcpy(buf + 4, l2cap_fixed_chan, 8);
4588 l2cap_send_cmd(conn, cmd->ident,
4589 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004590 } else {
4591 struct l2cap_info_rsp rsp;
4592 rsp.type = cpu_to_le16(type);
4593 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4594 l2cap_send_cmd(conn, cmd->ident,
4595 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597
4598 return 0;
4599}
4600
4601static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4602{
4603 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4604 u16 type, result;
4605
4606 type = __le16_to_cpu(rsp->type);
4607 result = __le16_to_cpu(rsp->result);
4608
4609 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4610
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004611 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4612 if (cmd->ident != conn->info_ident ||
4613 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4614 return 0;
4615
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004616 del_timer(&conn->info_timer);
4617
Ville Tervoadb08ed2010-08-04 09:43:33 +03004618 if (result != L2CAP_IR_SUCCESS) {
4619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4620 conn->info_ident = 0;
4621
4622 l2cap_conn_start(conn);
4623
4624 return 0;
4625 }
4626
Marcel Holtmann984947d2009-02-06 23:35:19 +01004627 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004628 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004629
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004630 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004631 struct l2cap_info_req req;
4632 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4633
4634 conn->info_ident = l2cap_get_ident(conn);
4635
4636 l2cap_send_cmd(conn, conn->info_ident,
4637 L2CAP_INFO_REQ, sizeof(req), &req);
4638 } else {
4639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4640 conn->info_ident = 0;
4641
4642 l2cap_conn_start(conn);
4643 }
4644 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004645 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004646 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004647 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004648
4649 l2cap_conn_start(conn);
4650 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004651
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 return 0;
4653}
4654
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004655static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4656 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4657{
4658 struct l2cap_move_chan_req req;
4659 u8 ident;
4660
4661 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4662 (int) dest_amp_id);
4663
4664 ident = l2cap_get_ident(conn);
4665 if (pi)
4666 pi->ident = ident;
4667
4668 req.icid = cpu_to_le16(icid);
4669 req.dest_amp_id = dest_amp_id;
4670
4671 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4672}
4673
4674static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4675 u16 icid, u16 result)
4676{
4677 struct l2cap_move_chan_rsp rsp;
4678
4679 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4680
4681 rsp.icid = cpu_to_le16(icid);
4682 rsp.result = cpu_to_le16(result);
4683
4684 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4685}
4686
4687static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4688 struct l2cap_pinfo *pi, u16 icid, u16 result)
4689{
4690 struct l2cap_move_chan_cfm cfm;
4691 u8 ident;
4692
4693 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4694
4695 ident = l2cap_get_ident(conn);
4696 if (pi)
4697 pi->ident = ident;
4698
4699 cfm.icid = cpu_to_le16(icid);
4700 cfm.result = cpu_to_le16(result);
4701
4702 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4703}
4704
4705static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4706 u16 icid)
4707{
4708 struct l2cap_move_chan_cfm_rsp rsp;
4709
4710 BT_DBG("icid %d", (int) icid);
4711
4712 rsp.icid = cpu_to_le16(icid);
4713 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4714}
4715
4716static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4717 struct l2cap_cmd_hdr *cmd, u8 *data)
4718{
4719 struct l2cap_create_chan_req *req =
4720 (struct l2cap_create_chan_req *) data;
4721 struct sock *sk;
4722 u16 psm, scid;
4723
4724 psm = le16_to_cpu(req->psm);
4725 scid = le16_to_cpu(req->scid);
4726
4727 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4728 (int) req->amp_id);
4729
4730 if (req->amp_id) {
4731 struct hci_dev *hdev;
4732
4733 /* Validate AMP controller id */
4734 hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
4735 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4736 struct l2cap_create_chan_rsp rsp;
4737
4738 rsp.dcid = 0;
4739 rsp.scid = cpu_to_le16(scid);
4740 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4741 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4742
4743 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4744 sizeof(rsp), &rsp);
4745
4746 if (hdev)
4747 hci_dev_put(hdev);
4748
4749 return 0;
4750 }
4751
4752 hci_dev_put(hdev);
4753 }
4754
4755 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4756 req->amp_id);
4757
Mat Martineau55f2a622011-09-19 13:20:17 -07004758 if (sk)
4759 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004760
Mat Martineau55f2a622011-09-19 13:20:17 -07004761 if (sk && req->amp_id &&
4762 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004763 amp_accept_physical(conn, req->amp_id, sk);
4764
4765 return 0;
4766}
4767
4768static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4769 struct l2cap_cmd_hdr *cmd, u8 *data)
4770{
4771 BT_DBG("conn %p", conn);
4772
4773 return l2cap_connect_rsp(conn, cmd, data);
4774}
4775
4776static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4777 struct l2cap_cmd_hdr *cmd, u8 *data)
4778{
4779 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4780 struct sock *sk;
4781 struct l2cap_pinfo *pi;
4782 u16 icid = 0;
4783 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4784
4785 icid = le16_to_cpu(req->icid);
4786
4787 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4788
4789 read_lock(&conn->chan_list.lock);
4790 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4791 read_unlock(&conn->chan_list.lock);
4792
4793 if (!sk)
4794 goto send_move_response;
4795
4796 lock_sock(sk);
4797 pi = l2cap_pi(sk);
4798
4799 if (pi->scid < L2CAP_CID_DYN_START ||
4800 (pi->mode != L2CAP_MODE_ERTM &&
4801 pi->mode != L2CAP_MODE_STREAMING)) {
4802 goto send_move_response;
4803 }
4804
4805 if (pi->amp_id == req->dest_amp_id) {
4806 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4807 goto send_move_response;
4808 }
4809
4810 if (req->dest_amp_id) {
4811 struct hci_dev *hdev;
4812 hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
4813 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4814 if (hdev)
4815 hci_dev_put(hdev);
4816
4817 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4818 goto send_move_response;
4819 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08004820 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004821 }
4822
4823 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4824 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4825 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4826 bacmp(conn->src, conn->dst) > 0) {
4827 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4828 goto send_move_response;
4829 }
4830
4831 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4832 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4833 goto send_move_response;
4834 }
4835
4836 pi->amp_move_cmd_ident = cmd->ident;
4837 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4838 l2cap_amp_move_setup(sk);
4839 pi->amp_move_id = req->dest_amp_id;
4840 icid = pi->dcid;
4841
4842 if (req->dest_amp_id == 0) {
4843 /* Moving to BR/EDR */
4844 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4845 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4846 result = L2CAP_MOVE_CHAN_PENDING;
4847 } else {
4848 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4849 result = L2CAP_MOVE_CHAN_SUCCESS;
4850 }
4851 } else {
4852 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4853 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4854 result = L2CAP_MOVE_CHAN_PENDING;
4855 }
4856
4857send_move_response:
4858 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4859
4860 if (sk)
4861 release_sock(sk);
4862
4863 return 0;
4864}
4865
4866static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4867 struct l2cap_cmd_hdr *cmd, u8 *data)
4868{
4869 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4870 struct sock *sk;
4871 struct l2cap_pinfo *pi;
4872 u16 icid, result;
4873
4874 icid = le16_to_cpu(rsp->icid);
4875 result = le16_to_cpu(rsp->result);
4876
4877 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4878
4879 switch (result) {
4880 case L2CAP_MOVE_CHAN_SUCCESS:
4881 case L2CAP_MOVE_CHAN_PENDING:
4882 read_lock(&conn->chan_list.lock);
4883 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4884 read_unlock(&conn->chan_list.lock);
4885
4886 if (!sk) {
4887 l2cap_send_move_chan_cfm(conn, NULL, icid,
4888 L2CAP_MOVE_CHAN_UNCONFIRMED);
4889 break;
4890 }
4891
4892 lock_sock(sk);
4893 pi = l2cap_pi(sk);
4894
4895 l2cap_sock_clear_timer(sk);
4896 if (result == L2CAP_MOVE_CHAN_PENDING)
4897 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4898
4899 if (pi->amp_move_state ==
4900 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4901 /* Move confirm will be sent when logical link
4902 * is complete.
4903 */
4904 pi->amp_move_state =
4905 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4906 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4907 pi->amp_move_state ==
4908 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4909 /* Logical link is up or moving to BR/EDR,
4910 * proceed with move */
4911 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4912 pi->amp_move_state =
4913 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4914 } else {
4915 pi->amp_move_state =
4916 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4917 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4918 L2CAP_MOVE_CHAN_CONFIRMED);
4919 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4920 }
4921 } else if (pi->amp_move_state ==
4922 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4923 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4924 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4925 struct hci_chan *chan;
4926 /* Moving to AMP */
4927 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4928 /* Remote is ready, send confirm immediately
4929 * after logical link is ready
4930 */
4931 pi->amp_move_state =
4932 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4933 } else {
4934 /* Both logical link and move success
4935 * are required to confirm
4936 */
4937 pi->amp_move_state =
4938 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4939 }
4940 pi->remote_fs = default_fs;
4941 pi->local_fs = default_fs;
4942 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4943 if (!chan) {
4944 /* Logical link not available */
4945 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4946 L2CAP_MOVE_CHAN_UNCONFIRMED);
4947 break;
4948 }
4949 if (chan->state == BT_CONNECTED) {
4950 /* Logical link is already ready to go */
4951 pi->ampchan = chan;
4952 pi->ampcon = chan->conn;
4953 pi->ampcon->l2cap_data = pi->conn;
4954 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4955 /* Can confirm now */
4956 l2cap_send_move_chan_cfm(conn, pi,
4957 pi->scid,
4958 L2CAP_MOVE_CHAN_CONFIRMED);
4959 } else {
4960 /* Now only need move success
4961 * required to confirm
4962 */
4963 pi->amp_move_state =
4964 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4965 }
4966 } else
4967 chan->l2cap_sk = sk;
4968 } else {
4969 /* Any other amp move state means the move failed. */
4970 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4971 L2CAP_MOVE_CHAN_UNCONFIRMED);
4972 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4973 }
4974 break;
4975 default:
4976 /* Failed (including collision case) */
4977 read_lock(&conn->chan_list.lock);
4978 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4979 read_unlock(&conn->chan_list.lock);
4980
4981 if (!sk) {
4982 /* Could not locate channel, icid is best guess */
4983 l2cap_send_move_chan_cfm(conn, NULL, icid,
4984 L2CAP_MOVE_CHAN_UNCONFIRMED);
4985 break;
4986 }
4987
4988 lock_sock(sk);
4989 pi = l2cap_pi(sk);
4990
4991 l2cap_sock_clear_timer(sk);
4992
4993 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
4994 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
4995 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4996 else {
4997 /* Cleanup - cancel move */
4998 pi->amp_move_id = pi->amp_id;
4999 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5000 l2cap_amp_move_revert(sk);
5001 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5002 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005003 }
5004
5005 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5006 L2CAP_MOVE_CHAN_UNCONFIRMED);
5007 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5008 break;
5009 }
5010
5011 if (sk)
5012 release_sock(sk);
5013
5014 return 0;
5015}
5016
5017static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5018 struct l2cap_cmd_hdr *cmd, u8 *data)
5019{
5020 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
5021 struct sock *sk;
5022 u16 icid, result;
5023
5024 icid = le16_to_cpu(cfm->icid);
5025 result = le16_to_cpu(cfm->result);
5026
5027 BT_DBG("icid %d, result %d", (int) icid, (int) result);
5028
5029 read_lock(&conn->chan_list.lock);
5030 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
5031 read_unlock(&conn->chan_list.lock);
5032
5033 if (!sk) {
5034 BT_DBG("Bad channel (%d)", (int) icid);
5035 goto send_move_confirm_response;
5036 }
5037
5038 lock_sock(sk);
5039
5040 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
5041 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5042 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
5043 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5044 if ((!l2cap_pi(sk)->amp_id) &&
5045 (l2cap_pi(sk)->ampchan)) {
5046 /* Have moved off of AMP, free the channel */
5047 hci_chan_put(l2cap_pi(sk)->ampchan);
5048 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5049 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5050 l2cap_pi(sk));
5051 l2cap_pi(sk)->ampchan = NULL;
5052 l2cap_pi(sk)->ampcon = NULL;
5053 }
5054 l2cap_amp_move_success(sk);
5055 } else {
5056 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5057 l2cap_amp_move_revert(sk);
5058 }
5059 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5060 } else if (l2cap_pi(sk)->amp_move_state ==
5061 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5062 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5063 }
5064
5065send_move_confirm_response:
5066 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5067
5068 if (sk)
5069 release_sock(sk);
5070
5071 return 0;
5072}
5073
5074static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5075 struct l2cap_cmd_hdr *cmd, u8 *data)
5076{
5077 struct l2cap_move_chan_cfm_rsp *rsp =
5078 (struct l2cap_move_chan_cfm_rsp *) data;
5079 struct sock *sk;
5080 u16 icid;
5081
5082 icid = le16_to_cpu(rsp->icid);
5083
5084 BT_DBG("icid %d", (int) icid);
5085
5086 read_lock(&conn->chan_list.lock);
5087 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5088 read_unlock(&conn->chan_list.lock);
5089
5090 if (!sk)
5091 return 0;
5092
5093 lock_sock(sk);
5094
5095 l2cap_sock_clear_timer(sk);
5096
5097 if (l2cap_pi(sk)->amp_move_state ==
5098 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5099 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5100 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5101
5102 if (!l2cap_pi(sk)->amp_id) {
5103 /* Have moved off of AMP, free the channel */
5104 l2cap_pi(sk)->ampcon = NULL;
5105 if (l2cap_pi(sk)->ampchan) {
5106 hci_chan_put(l2cap_pi(sk)->ampchan);
5107 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5108 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5109 l2cap_pi(sk));
5110 }
5111 l2cap_pi(sk)->ampchan = NULL;
5112 }
5113
5114 l2cap_amp_move_success(sk);
5115
5116 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5117 }
5118
5119 release_sock(sk);
5120
5121 return 0;
5122}
5123
5124static void l2cap_amp_signal_worker(struct work_struct *work)
5125{
5126 int err = 0;
5127 struct l2cap_amp_signal_work *ampwork =
5128 container_of(work, struct l2cap_amp_signal_work, work);
5129
5130 switch (ampwork->cmd.code) {
5131 case L2CAP_MOVE_CHAN_REQ:
5132 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5133 ampwork->data);
5134 break;
5135
5136 case L2CAP_MOVE_CHAN_RSP:
5137 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5138 ampwork->data);
5139 break;
5140
5141 case L2CAP_MOVE_CHAN_CFM:
5142 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5143 ampwork->data);
5144 break;
5145
5146 case L2CAP_MOVE_CHAN_CFM_RSP:
5147 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5148 &ampwork->cmd, ampwork->data);
5149 break;
5150
5151 default:
5152 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5153 err = -EINVAL;
5154 break;
5155 }
5156
5157 if (err) {
5158 struct l2cap_cmd_rej rej;
5159 BT_DBG("error %d", err);
5160
5161 /* In this context, commands are only rejected with
5162 * "command not understood", code 0.
5163 */
5164 rej.reason = cpu_to_le16(0);
5165 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5166 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5167 }
5168
5169 kfree_skb(ampwork->skb);
5170 kfree(ampwork);
5171}
5172
5173void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5174 struct sock *sk)
5175{
5176 struct l2cap_pinfo *pi;
5177
5178 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5179 (int) local_id, (int) remote_id, sk);
5180
5181 lock_sock(sk);
5182
5183 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5184 release_sock(sk);
5185 return;
5186 }
5187
5188 pi = l2cap_pi(sk);
5189
5190 if (sk->sk_state != BT_CONNECTED) {
5191 if (bt_sk(sk)->parent) {
5192 struct l2cap_conn_rsp rsp;
5193 char buf[128];
5194 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5195 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5196
5197 /* Incoming channel on AMP */
5198 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5199 /* Send successful response */
5200 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5201 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5202 } else {
5203 /* Send negative response */
5204 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5205 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5206 }
5207
5208 l2cap_send_cmd(pi->conn, pi->ident,
5209 L2CAP_CREATE_CHAN_RSP,
5210 sizeof(rsp), &rsp);
5211
5212 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5213 sk->sk_state = BT_CONFIG;
5214 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5215 l2cap_send_cmd(pi->conn,
5216 l2cap_get_ident(pi->conn),
5217 L2CAP_CONF_REQ,
5218 l2cap_build_conf_req(sk, buf), buf);
5219 l2cap_pi(sk)->num_conf_req++;
5220 }
5221 } else {
5222 /* Outgoing channel on AMP */
5223 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5224 /* Revert to BR/EDR connect */
5225 l2cap_send_conn_req(sk);
5226 } else {
5227 pi->amp_id = local_id;
5228 l2cap_send_create_chan_req(sk, remote_id);
5229 }
5230 }
5231 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5232 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5233 l2cap_amp_move_setup(sk);
5234 pi->amp_move_id = local_id;
5235 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5236
5237 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5238 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5239 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5240 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5241 struct hci_chan *chan;
5242 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5243 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5244 pi->remote_fs = default_fs;
5245 pi->local_fs = default_fs;
5246 chan = l2cap_chan_admit(local_id, pi);
5247 if (chan) {
5248 if (chan->state == BT_CONNECTED) {
5249 /* Logical link is ready to go */
5250 pi->ampchan = chan;
5251 pi->ampcon = chan->conn;
5252 pi->ampcon->l2cap_data = pi->conn;
5253 pi->amp_move_state =
5254 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5255 l2cap_send_move_chan_rsp(pi->conn,
5256 pi->amp_move_cmd_ident, pi->dcid,
5257 L2CAP_MOVE_CHAN_SUCCESS);
5258 } else {
5259 /* Wait for logical link to be ready */
5260 chan->l2cap_sk = sk;
5261 pi->amp_move_state =
5262 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5263 }
5264 } else {
5265 /* Logical link not available */
5266 l2cap_send_move_chan_rsp(pi->conn,
5267 pi->amp_move_cmd_ident, pi->dcid,
5268 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5269 }
5270 } else {
5271 BT_DBG("result %d, role %d, local_busy %d", result,
5272 (int) pi->amp_move_role,
5273 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5274
5275 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5276 if (result == -EINVAL)
5277 l2cap_send_move_chan_rsp(pi->conn,
5278 pi->amp_move_cmd_ident, pi->dcid,
5279 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5280 else
5281 l2cap_send_move_chan_rsp(pi->conn,
5282 pi->amp_move_cmd_ident, pi->dcid,
5283 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5284 }
5285
5286 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5287 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5288
5289 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5290 l2cap_rmem_available(sk))
5291 l2cap_ertm_tx(sk, 0, 0,
5292 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5293
5294 /* Restart data transmission */
5295 l2cap_ertm_send(sk);
5296 }
5297
5298 release_sock(sk);
5299}
5300
5301int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5302{
5303 struct l2cap_pinfo *pi;
5304 struct sock *sk;
5305
5306 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5307
5308 sk = chan->l2cap_sk;
5309
5310 BT_DBG("sk %p", sk);
5311
5312 lock_sock(sk);
5313
5314 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5315 release_sock(sk);
5316 return 0;
5317 }
5318
5319 pi = l2cap_pi(sk);
5320
5321 if ((!status) && (chan != NULL)) {
5322 pi->ampchan = chan;
5323 pi->ampcon = chan->conn;
5324 pi->ampcon->l2cap_data = pi->conn;
5325
5326 if (sk->sk_state != BT_CONNECTED) {
5327 struct l2cap_conf_rsp rsp;
5328
5329 /* Must use spinlock to prevent concurrent
5330 * execution of l2cap_config_rsp()
5331 */
5332 bh_lock_sock(sk);
5333 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5334 l2cap_build_conf_rsp(sk, &rsp,
5335 L2CAP_CONF_SUCCESS, 0), &rsp);
5336 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5337
5338 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5339 set_default_fcs(l2cap_pi(sk));
5340
5341 sk->sk_state = BT_CONNECTED;
5342
5343 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5344 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5345 l2cap_ertm_init(sk);
5346
5347 l2cap_chan_ready(sk);
5348 }
5349 bh_unlock_sock(sk);
5350 } else if (pi->amp_move_state ==
5351 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5352 /* Move confirm will be sent after a success
5353 * response is received
5354 */
5355 pi->amp_move_state =
5356 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5357 } else if (pi->amp_move_state ==
5358 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5359 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5360 pi->amp_move_state =
5361 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5362 else if (pi->amp_move_role ==
5363 L2CAP_AMP_MOVE_INITIATOR) {
5364 pi->amp_move_state =
5365 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5366 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5367 L2CAP_MOVE_CHAN_SUCCESS);
5368 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5369 } else if (pi->amp_move_role ==
5370 L2CAP_AMP_MOVE_RESPONDER) {
5371 pi->amp_move_state =
5372 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5373 l2cap_send_move_chan_rsp(pi->conn,
5374 pi->amp_move_cmd_ident, pi->dcid,
5375 L2CAP_MOVE_CHAN_SUCCESS);
5376 }
5377 } else {
5378 /* Move was not in expected state, free the
5379 * logical link
5380 */
5381 hci_chan_put(pi->ampchan);
5382 pi->ampcon = NULL;
5383 pi->ampchan = NULL;
5384 }
5385 } else {
5386 /* Logical link setup failed. */
5387
5388 if (sk->sk_state != BT_CONNECTED)
5389 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5390 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5391 l2cap_amp_move_revert(sk);
5392 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5393 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5394 l2cap_send_move_chan_rsp(pi->conn,
5395 pi->amp_move_cmd_ident, pi->dcid,
5396 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5397 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5398 if ((pi->amp_move_state ==
5399 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5400 (pi->amp_move_state ==
5401 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5402 /* Remote has only sent pending or
5403 * success responses, clean up
5404 */
5405 l2cap_amp_move_revert(sk);
5406 l2cap_pi(sk)->amp_move_role =
5407 L2CAP_AMP_MOVE_NONE;
5408 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5409 }
5410
5411 /* Other amp move states imply that the move
5412 * has already aborted
5413 */
5414 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5415 L2CAP_MOVE_CHAN_UNCONFIRMED);
5416 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5417 }
5418
5419 pi->ampcon = NULL;
5420 pi->ampchan = NULL;
5421 }
5422
5423 release_sock(sk);
5424 return 0;
5425}
5426
5427static void l2cap_logical_link_worker(struct work_struct *work)
5428{
5429 struct l2cap_logical_link_work *log_link_work =
5430 container_of(work, struct l2cap_logical_link_work, work);
5431
5432 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5433 kfree(log_link_work);
5434}
5435
5436static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5437{
5438 struct l2cap_logical_link_work *amp_work;
5439
5440 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5441 if (!amp_work)
5442 return -ENOMEM;
5443
5444 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5445 amp_work->chan = chan;
5446 amp_work->status = status;
5447 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5448 kfree(amp_work);
5449 return -ENOMEM;
5450 }
5451
5452 return 0;
5453}
5454
5455int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5456{
5457 struct l2cap_conn *conn = chan->conn->l2cap_data;
5458
5459 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5460
5461 /* TODO: if failed status restore previous fs */
5462 return 0;
5463}
5464
5465int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5466{
5467 struct l2cap_chan_list *l;
5468 struct l2cap_conn *conn = chan->conn->l2cap_data;
5469 struct sock *sk;
5470
5471 BT_DBG("chan %p conn %p", chan, conn);
5472
5473 if (!conn)
5474 return 0;
5475
5476 l = &conn->chan_list;
5477
5478 read_lock(&l->lock);
5479
5480 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5481 bh_lock_sock(sk);
5482 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5483 if (l2cap_pi(sk)->ampchan == chan) {
5484 l2cap_pi(sk)->ampchan = NULL;
5485 l2cap_amp_move_init(sk);
5486 }
5487 bh_unlock_sock(sk);
5488 }
5489
5490 read_unlock(&l->lock);
5491
5492 return 0;
5493
5494
5495}
5496
5497static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5498 u8 *data, struct sk_buff *skb)
5499{
5500 struct l2cap_amp_signal_work *amp_work;
5501
5502 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5503 if (!amp_work)
5504 return -ENOMEM;
5505
5506 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5507 amp_work->conn = conn;
5508 amp_work->cmd = *cmd;
5509 amp_work->data = data;
5510 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5511 if (!amp_work->skb) {
5512 kfree(amp_work);
5513 return -ENOMEM;
5514 }
5515
5516 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5517 kfree_skb(amp_work->skb);
5518 kfree(amp_work);
5519 return -ENOMEM;
5520 }
5521
5522 return 0;
5523}
5524
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005525static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005526 u16 to_multiplier)
5527{
5528 u16 max_latency;
5529
5530 if (min > max || min < 6 || max > 3200)
5531 return -EINVAL;
5532
5533 if (to_multiplier < 10 || to_multiplier > 3200)
5534 return -EINVAL;
5535
5536 if (max >= to_multiplier * 8)
5537 return -EINVAL;
5538
5539 max_latency = (to_multiplier * 8 / max) - 1;
5540 if (latency > 499 || latency > max_latency)
5541 return -EINVAL;
5542
5543 return 0;
5544}
5545
5546static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5547 struct l2cap_cmd_hdr *cmd, u8 *data)
5548{
5549 struct hci_conn *hcon = conn->hcon;
5550 struct l2cap_conn_param_update_req *req;
5551 struct l2cap_conn_param_update_rsp rsp;
5552 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005553 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005554
5555 if (!(hcon->link_mode & HCI_LM_MASTER))
5556 return -EINVAL;
5557
5558 cmd_len = __le16_to_cpu(cmd->len);
5559 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5560 return -EPROTO;
5561
5562 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005563 min = __le16_to_cpu(req->min);
5564 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005565 latency = __le16_to_cpu(req->latency);
5566 to_multiplier = __le16_to_cpu(req->to_multiplier);
5567
5568 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5569 min, max, latency, to_multiplier);
5570
5571 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005572
5573 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5574 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005575 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5576 else
5577 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5578
5579 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5580 sizeof(rsp), &rsp);
5581
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005582 if (!err)
5583 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5584
Claudio Takahaside731152011-02-11 19:28:55 -02005585 return 0;
5586}
5587
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005588static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005589 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5590 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005591{
5592 int err = 0;
5593
5594 switch (cmd->code) {
5595 case L2CAP_COMMAND_REJ:
5596 l2cap_command_rej(conn, cmd, data);
5597 break;
5598
5599 case L2CAP_CONN_REQ:
5600 err = l2cap_connect_req(conn, cmd, data);
5601 break;
5602
5603 case L2CAP_CONN_RSP:
5604 err = l2cap_connect_rsp(conn, cmd, data);
5605 break;
5606
5607 case L2CAP_CONF_REQ:
5608 err = l2cap_config_req(conn, cmd, cmd_len, data);
5609 break;
5610
5611 case L2CAP_CONF_RSP:
5612 err = l2cap_config_rsp(conn, cmd, data);
5613 break;
5614
5615 case L2CAP_DISCONN_REQ:
5616 err = l2cap_disconnect_req(conn, cmd, data);
5617 break;
5618
5619 case L2CAP_DISCONN_RSP:
5620 err = l2cap_disconnect_rsp(conn, cmd, data);
5621 break;
5622
5623 case L2CAP_ECHO_REQ:
5624 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5625 break;
5626
5627 case L2CAP_ECHO_RSP:
5628 break;
5629
5630 case L2CAP_INFO_REQ:
5631 err = l2cap_information_req(conn, cmd, data);
5632 break;
5633
5634 case L2CAP_INFO_RSP:
5635 err = l2cap_information_rsp(conn, cmd, data);
5636 break;
5637
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005638 case L2CAP_CREATE_CHAN_REQ:
5639 err = l2cap_create_channel_req(conn, cmd, data);
5640 break;
5641
5642 case L2CAP_CREATE_CHAN_RSP:
5643 err = l2cap_create_channel_rsp(conn, cmd, data);
5644 break;
5645
5646 case L2CAP_MOVE_CHAN_REQ:
5647 case L2CAP_MOVE_CHAN_RSP:
5648 case L2CAP_MOVE_CHAN_CFM:
5649 case L2CAP_MOVE_CHAN_CFM_RSP:
5650 err = l2cap_sig_amp(conn, cmd, data, skb);
5651 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005652 default:
5653 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5654 err = -EINVAL;
5655 break;
5656 }
5657
5658 return err;
5659}
5660
5661static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5662 struct l2cap_cmd_hdr *cmd, u8 *data)
5663{
5664 switch (cmd->code) {
5665 case L2CAP_COMMAND_REJ:
5666 return 0;
5667
5668 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005669 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005670
5671 case L2CAP_CONN_PARAM_UPDATE_RSP:
5672 return 0;
5673
5674 default:
5675 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5676 return -EINVAL;
5677 }
5678}
5679
5680static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5681 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682{
5683 u8 *data = skb->data;
5684 int len = skb->len;
5685 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005686 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687
5688 l2cap_raw_recv(conn, skb);
5689
5690 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005691 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5693 data += L2CAP_CMD_HDR_SIZE;
5694 len -= L2CAP_CMD_HDR_SIZE;
5695
Al Viro88219a02007-07-29 00:17:25 -07005696 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697
Al Viro88219a02007-07-29 00:17:25 -07005698 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699
Al Viro88219a02007-07-29 00:17:25 -07005700 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701 BT_DBG("corrupted command");
5702 break;
5703 }
5704
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005705 if (conn->hcon->type == LE_LINK)
5706 err = l2cap_le_sig_cmd(conn, &cmd, data);
5707 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005708 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5709 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710
5711 if (err) {
5712 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005713
5714 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715
5716 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005717 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5719 }
5720
Al Viro88219a02007-07-29 00:17:25 -07005721 data += cmd_len;
5722 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 }
5724
5725 kfree_skb(skb);
5726}
5727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005728static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005729{
5730 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005731 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005733 if (pi->extended_control)
5734 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5735 else
5736 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5737
5738 if (pi->fcs == L2CAP_FCS_CRC16) {
5739 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005740 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5741 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005743 if (our_fcs != rcv_fcs) {
5744 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005745 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005746 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005747 }
5748 return 0;
5749}
5750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005751static void l2cap_ertm_pass_to_tx(struct sock *sk,
5752 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005753{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005754 BT_DBG("sk %p, control %p", sk, control);
5755 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005756}
5757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005758static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5759 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005760{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005761 BT_DBG("sk %p, control %p", sk, control);
5762 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5763}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005765static void l2cap_ertm_resend(struct sock *sk)
5766{
5767 struct bt_l2cap_control control;
5768 struct l2cap_pinfo *pi;
5769 struct sk_buff *skb;
5770 struct sk_buff *tx_skb;
5771 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005773 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005775 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005777 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5778 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005780 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5781 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5782 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005784 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5785 seq = l2cap_seq_list_pop(&pi->retrans_list);
5786
5787 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5788 if (!skb) {
5789 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5790 (int) seq);
5791 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005792 }
5793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005794 bt_cb(skb)->retries += 1;
5795 control = bt_cb(skb)->control;
5796
5797 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5798 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5799 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5800 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005801 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005802 }
5803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005804 control.reqseq = pi->buffer_seq;
5805 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5806 control.final = 1;
5807 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5808 } else {
5809 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005810 }
5811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005812 if (skb_cloned(skb)) {
5813 /* Cloned sk_buffs are read-only, so we need a
5814 * writeable copy
5815 */
5816 tx_skb = skb_copy(skb, GFP_ATOMIC);
5817 } else {
5818 tx_skb = skb_clone(skb, GFP_ATOMIC);
5819 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005821 /* Update skb contents */
5822 if (pi->extended_control) {
5823 put_unaligned_le32(__pack_extended_control(&control),
5824 tx_skb->data + L2CAP_HDR_SIZE);
5825 } else {
5826 put_unaligned_le16(__pack_enhanced_control(&control),
5827 tx_skb->data + L2CAP_HDR_SIZE);
5828 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005830 if (pi->fcs == L2CAP_FCS_CRC16)
5831 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005832
Mat Martineau2f0cd842011-10-20 14:34:26 -07005833 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005834 tx_skb->sk = sk;
5835 tx_skb->destructor = l2cap_skb_destructor;
5836 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005838 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005840 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005842 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005843 }
5844}
5845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005846static inline void l2cap_ertm_retransmit(struct sock *sk,
5847 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005848{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005849 BT_DBG("sk %p, control %p", sk, control);
5850
5851 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5852 l2cap_ertm_resend(sk);
5853}
5854
5855static void l2cap_ertm_retransmit_all(struct sock *sk,
5856 struct bt_l2cap_control *control)
5857{
5858 struct l2cap_pinfo *pi;
5859 struct sk_buff *skb;
5860
5861 BT_DBG("sk %p, control %p", sk, control);
5862
5863 pi = l2cap_pi(sk);
5864
5865 if (control->poll)
5866 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5867
5868 l2cap_seq_list_clear(&pi->retrans_list);
5869
5870 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5871 return;
5872
5873 if (pi->unacked_frames) {
5874 skb_queue_walk(TX_QUEUE(sk), skb) {
5875 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5876 skb == sk->sk_send_head)
5877 break;
5878 }
5879
5880 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5881 if (skb == sk->sk_send_head)
5882 break;
5883
5884 l2cap_seq_list_append(&pi->retrans_list,
5885 bt_cb(skb)->control.txseq);
5886 }
5887
5888 l2cap_ertm_resend(sk);
5889 }
5890}
5891
5892static inline void append_skb_frag(struct sk_buff *skb,
5893 struct sk_buff *new_frag, struct sk_buff **last_frag)
5894{
5895 /* skb->len reflects data in skb as well as all fragments
5896 skb->data_len reflects only data in fragments
5897 */
5898 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5899
5900 if (!skb_has_frag_list(skb))
5901 skb_shinfo(skb)->frag_list = new_frag;
5902
5903 new_frag->next = NULL;
5904
5905 (*last_frag)->next = new_frag;
5906 *last_frag = new_frag;
5907
5908 skb->len += new_frag->len;
5909 skb->data_len += new_frag->len;
5910 skb->truesize += new_frag->truesize;
5911}
5912
5913static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5914 struct bt_l2cap_control *control, struct sk_buff *skb)
5915{
5916 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005917 int err = -EINVAL;
5918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005919 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5920 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005922 if (!control)
5923 return err;
5924
5925 pi = l2cap_pi(sk);
5926
5927 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5928 control->frame_type, control->sar, control->txseq,
5929 control->reqseq, control->final);
5930
5931 switch (control->sar) {
5932 case L2CAP_SAR_UNSEGMENTED:
5933 if (pi->sdu) {
5934 BT_DBG("Unexpected unsegmented PDU during reassembly");
5935 kfree_skb(pi->sdu);
5936 pi->sdu = NULL;
5937 pi->sdu_last_frag = NULL;
5938 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005939 }
5940
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005941 BT_DBG("Unsegmented");
5942 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005943 break;
5944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005945 case L2CAP_SAR_START:
5946 if (pi->sdu) {
5947 BT_DBG("Unexpected start PDU during reassembly");
5948 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005949 }
5950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005951 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005952 skb_pull(skb, 2);
5953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005954 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005955 err = -EMSGSIZE;
5956 break;
5957 }
5958
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005959 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005960 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005962 pi->sdu = skb;
5963 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005965 BT_DBG("Start");
5966
5967 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005968 err = 0;
5969 break;
5970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005971 case L2CAP_SAR_CONTINUE:
5972 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005973 break;
5974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005975 append_skb_frag(pi->sdu, skb,
5976 &pi->sdu_last_frag);
5977 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005979 if (pi->sdu->len >= pi->sdu_len)
5980 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005982 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5983
5984 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005985 break;
5986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005987 case L2CAP_SAR_END:
5988 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005989 break;
5990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005991 append_skb_frag(pi->sdu, skb,
5992 &pi->sdu_last_frag);
5993 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005995 if (pi->sdu->len != pi->sdu_len)
5996 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005998 BT_DBG("End, reassembled %d", pi->sdu->len);
5999 /* If the sender used tiny PDUs, the rcv queuing could fail.
6000 * Applications that have issues here should use a larger
6001 * sk_rcvbuf.
6002 */
6003 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03006004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006005 if (!err) {
6006 /* Reassembly complete */
6007 pi->sdu = NULL;
6008 pi->sdu_last_frag = NULL;
6009 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006010 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006011 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006013 default:
6014 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006015 break;
6016 }
6017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006018 if (err) {
6019 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
6020 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
6021 if (pi->sdu) {
6022 kfree_skb(pi->sdu);
6023 pi->sdu = NULL;
6024 }
6025 pi->sdu_last_frag = NULL;
6026 pi->sdu_len = 0;
6027 if (skb)
6028 kfree_skb(skb);
6029 }
6030
6031 /* Update local busy state */
6032 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
6033 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
6034
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006035 return err;
6036}
6037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006038static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006039{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006040 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006041 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
6042 * until a gap is encountered.
6043 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006045 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006047 BT_DBG("sk %p", sk);
6048 pi = l2cap_pi(sk);
6049
6050 while (l2cap_rmem_available(sk)) {
6051 struct sk_buff *skb;
6052 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6053 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6054
6055 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6056
6057 if (!skb)
6058 break;
6059
6060 skb_unlink(skb, SREJ_QUEUE(sk));
6061 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6062 err = l2cap_ertm_rx_expected_iframe(sk,
6063 &bt_cb(skb)->control, skb);
6064 if (err)
6065 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006066 }
6067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006068 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6069 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6070 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006071 }
6072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006073 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006074}
6075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006076static void l2cap_ertm_handle_srej(struct sock *sk,
6077 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006078{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006079 struct l2cap_pinfo *pi;
6080 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006082 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006084 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006086 if (control->reqseq == pi->next_tx_seq) {
6087 BT_DBG("Invalid reqseq %d, disconnecting",
6088 (int) control->reqseq);
6089 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006090 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006091 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006093 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006095 if (skb == NULL) {
6096 BT_DBG("Seq %d not available for retransmission",
6097 (int) control->reqseq);
6098 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006099 }
6100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006101 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6102 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6103 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6104 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006105 }
6106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006107 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006109 if (control->poll) {
6110 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006112 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6113 l2cap_ertm_retransmit(sk, control);
6114 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006116 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6117 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6118 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006119 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006120 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006121 l2cap_ertm_pass_to_tx_fbit(sk, control);
6122
6123 if (control->final) {
6124 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6125 (pi->srej_save_reqseq == control->reqseq)) {
6126 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6127 } else {
6128 l2cap_ertm_retransmit(sk, control);
6129 }
6130 } else {
6131 l2cap_ertm_retransmit(sk, control);
6132 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6133 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6134 pi->srej_save_reqseq = control->reqseq;
6135 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006136 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006137 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006138}
6139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006140static void l2cap_ertm_handle_rej(struct sock *sk,
6141 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006143 struct l2cap_pinfo *pi;
6144 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006146 BT_DBG("sk %p, control %p", sk, control);
6147
6148 pi = l2cap_pi(sk);
6149
6150 if (control->reqseq == pi->next_tx_seq) {
6151 BT_DBG("Invalid reqseq %d, disconnecting",
6152 (int) control->reqseq);
6153 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6154 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155 }
6156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006157 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006159 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6160 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6161 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6162 return;
6163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006165 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6166
6167 l2cap_ertm_pass_to_tx(sk, control);
6168
6169 if (control->final) {
6170 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6171 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6172 else
6173 l2cap_ertm_retransmit_all(sk, control);
6174 } else {
6175 l2cap_ertm_retransmit_all(sk, control);
6176 l2cap_ertm_send(sk);
6177 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6178 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6179 }
6180}
6181
6182static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6183{
6184 struct l2cap_pinfo *pi;
6185
6186 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6187 pi = l2cap_pi(sk);
6188
6189 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6190 (int)pi->expected_tx_seq);
6191
6192 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6193 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6194 /* See notes below regarding "double poll" and
6195 * invalid packets.
6196 */
6197 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - txseq outside "
6199 "tx window after SREJ sent");
6200 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6201 } else {
6202 BT_DBG("Invalid - bad txseq within tx "
6203 "window after SREJ sent");
6204 return L2CAP_ERTM_TXSEQ_INVALID;
6205 }
6206 }
6207
6208 if (pi->srej_list.head == txseq) {
6209 BT_DBG("Expected SREJ");
6210 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6211 }
6212
6213 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6214 BT_DBG("Duplicate SREJ - txseq already stored");
6215 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6216 }
6217
6218 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6219 BT_DBG("Unexpected SREJ - txseq not requested "
6220 "with SREJ");
6221 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6222 }
6223 }
6224
6225 if (pi->expected_tx_seq == txseq) {
6226 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6227 BT_DBG("Invalid - txseq outside tx window");
6228 return L2CAP_ERTM_TXSEQ_INVALID;
6229 } else {
6230 BT_DBG("Expected");
6231 return L2CAP_ERTM_TXSEQ_EXPECTED;
6232 }
6233 }
6234
6235 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6236 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6237 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6238 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6239 }
6240
6241 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6242 /* A source of invalid packets is a "double poll" condition,
6243 * where delays cause us to send multiple poll packets. If
6244 * the remote stack receives and processes both polls,
6245 * sequence numbers can wrap around in such a way that a
6246 * resent frame has a sequence number that looks like new data
6247 * with a sequence gap. This would trigger an erroneous SREJ
6248 * request.
6249 *
6250 * Fortunately, this is impossible with a tx window that's
6251 * less than half of the maximum sequence number, which allows
6252 * invalid frames to be safely ignored.
6253 *
6254 * With tx window sizes greater than half of the tx window
6255 * maximum, the frame is invalid and cannot be ignored. This
6256 * causes a disconnect.
6257 */
6258
6259 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6260 BT_DBG("Invalid/Ignore - txseq outside tx window");
6261 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6262 } else {
6263 BT_DBG("Invalid - txseq outside tx window");
6264 return L2CAP_ERTM_TXSEQ_INVALID;
6265 }
6266 } else {
6267 BT_DBG("Unexpected - txseq indicates missing frames");
6268 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6269 }
6270}
6271
6272static int l2cap_ertm_rx_state_recv(struct sock *sk,
6273 struct bt_l2cap_control *control,
6274 struct sk_buff *skb, u8 event)
6275{
6276 struct l2cap_pinfo *pi;
6277 int err = 0;
6278 bool skb_in_use = 0;
6279
6280 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6281 (int)event);
6282 pi = l2cap_pi(sk);
6283
6284 switch (event) {
6285 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6286 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6287 case L2CAP_ERTM_TXSEQ_EXPECTED:
6288 l2cap_ertm_pass_to_tx(sk, control);
6289
6290 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6291 BT_DBG("Busy, discarding expected seq %d",
6292 control->txseq);
6293 break;
6294 }
6295
6296 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6297 pi->buffer_seq = pi->expected_tx_seq;
6298 skb_in_use = 1;
6299
6300 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6301 if (err)
6302 break;
6303
6304 if (control->final) {
6305 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6306 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6307 else {
6308 control->final = 0;
6309 l2cap_ertm_retransmit_all(sk, control);
6310 l2cap_ertm_send(sk);
6311 }
6312 }
6313
6314 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6315 l2cap_ertm_send_ack(sk);
6316 break;
6317 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6318 l2cap_ertm_pass_to_tx(sk, control);
6319
6320 /* Can't issue SREJ frames in the local busy state.
6321 * Drop this frame, it will be seen as missing
6322 * when local busy is exited.
6323 */
6324 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6325 BT_DBG("Busy, discarding unexpected seq %d",
6326 control->txseq);
6327 break;
6328 }
6329
6330 /* There was a gap in the sequence, so an SREJ
6331 * must be sent for each missing frame. The
6332 * current frame is stored for later use.
6333 */
6334 skb_queue_tail(SREJ_QUEUE(sk), skb);
6335 skb_in_use = 1;
6336 BT_DBG("Queued %p (queue len %d)", skb,
6337 skb_queue_len(SREJ_QUEUE(sk)));
6338
6339 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6340 l2cap_seq_list_clear(&pi->srej_list);
6341 l2cap_ertm_send_srej(sk, control->txseq);
6342
6343 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6344 break;
6345 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6346 l2cap_ertm_pass_to_tx(sk, control);
6347 break;
6348 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6349 break;
6350 case L2CAP_ERTM_TXSEQ_INVALID:
6351 default:
6352 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6353 ECONNRESET);
6354 break;
6355 }
6356 break;
6357 case L2CAP_ERTM_EVENT_RECV_RR:
6358 l2cap_ertm_pass_to_tx(sk, control);
6359 if (control->final) {
6360 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6361
6362 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6363 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6364 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6365 pi->amp_move_state ==
6366 L2CAP_AMP_STATE_WAIT_PREPARE) {
6367 control->final = 0;
6368 l2cap_ertm_retransmit_all(sk, control);
6369 }
6370
6371 l2cap_ertm_send(sk);
6372 } else if (control->poll) {
6373 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6374 } else {
6375 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6376 pi->unacked_frames)
6377 l2cap_ertm_start_retrans_timer(pi);
6378 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6379 l2cap_ertm_send(sk);
6380 }
6381 break;
6382 case L2CAP_ERTM_EVENT_RECV_RNR:
6383 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6384 l2cap_ertm_pass_to_tx(sk, control);
6385 if (control && control->poll) {
6386 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6387 l2cap_ertm_send_rr_or_rnr(sk, 0);
6388 }
6389 l2cap_ertm_stop_retrans_timer(pi);
6390 l2cap_seq_list_clear(&pi->retrans_list);
6391 break;
6392 case L2CAP_ERTM_EVENT_RECV_REJ:
6393 l2cap_ertm_handle_rej(sk, control);
6394 break;
6395 case L2CAP_ERTM_EVENT_RECV_SREJ:
6396 l2cap_ertm_handle_srej(sk, control);
6397 break;
6398 default:
6399 break;
6400 }
6401
6402 if (skb && !skb_in_use) {
6403 BT_DBG("Freeing %p", skb);
6404 kfree_skb(skb);
6405 }
6406
6407 return err;
6408}
6409
6410static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6411 struct bt_l2cap_control *control,
6412 struct sk_buff *skb, u8 event)
6413{
6414 struct l2cap_pinfo *pi;
6415 int err = 0;
6416 u16 txseq = control->txseq;
6417 bool skb_in_use = 0;
6418
6419 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6420 (int)event);
6421 pi = l2cap_pi(sk);
6422
6423 switch (event) {
6424 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6425 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6426 case L2CAP_ERTM_TXSEQ_EXPECTED:
6427 /* Keep frame for reassembly later */
6428 l2cap_ertm_pass_to_tx(sk, control);
6429 skb_queue_tail(SREJ_QUEUE(sk), skb);
6430 skb_in_use = 1;
6431 BT_DBG("Queued %p (queue len %d)", skb,
6432 skb_queue_len(SREJ_QUEUE(sk)));
6433
6434 pi->expected_tx_seq = __next_seq(txseq, pi);
6435 break;
6436 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6437 l2cap_seq_list_pop(&pi->srej_list);
6438
6439 l2cap_ertm_pass_to_tx(sk, control);
6440 skb_queue_tail(SREJ_QUEUE(sk), skb);
6441 skb_in_use = 1;
6442 BT_DBG("Queued %p (queue len %d)", skb,
6443 skb_queue_len(SREJ_QUEUE(sk)));
6444
6445 err = l2cap_ertm_rx_queued_iframes(sk);
6446 if (err)
6447 break;
6448
6449 break;
6450 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6451 /* Got a frame that can't be reassembled yet.
6452 * Save it for later, and send SREJs to cover
6453 * the missing frames.
6454 */
6455 skb_queue_tail(SREJ_QUEUE(sk), skb);
6456 skb_in_use = 1;
6457 BT_DBG("Queued %p (queue len %d)", skb,
6458 skb_queue_len(SREJ_QUEUE(sk)));
6459
6460 l2cap_ertm_pass_to_tx(sk, control);
6461 l2cap_ertm_send_srej(sk, control->txseq);
6462 break;
6463 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6464 /* This frame was requested with an SREJ, but
6465 * some expected retransmitted frames are
6466 * missing. Request retransmission of missing
6467 * SREJ'd frames.
6468 */
6469 skb_queue_tail(SREJ_QUEUE(sk), skb);
6470 skb_in_use = 1;
6471 BT_DBG("Queued %p (queue len %d)", skb,
6472 skb_queue_len(SREJ_QUEUE(sk)));
6473
6474 l2cap_ertm_pass_to_tx(sk, control);
6475 l2cap_ertm_send_srej_list(sk, control->txseq);
6476 break;
6477 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6478 /* We've already queued this frame. Drop this copy. */
6479 l2cap_ertm_pass_to_tx(sk, control);
6480 break;
6481 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6482 /* Expecting a later sequence number, so this frame
6483 * was already received. Ignore it completely.
6484 */
6485 break;
6486 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6487 break;
6488 case L2CAP_ERTM_TXSEQ_INVALID:
6489 default:
6490 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6491 ECONNRESET);
6492 break;
6493 }
6494 break;
6495 case L2CAP_ERTM_EVENT_RECV_RR:
6496 l2cap_ertm_pass_to_tx(sk, control);
6497 if (control->final) {
6498 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6499
6500 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6501 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6502 else {
6503 control->final = 0;
6504 l2cap_ertm_retransmit_all(sk, control);
6505 }
6506
6507 l2cap_ertm_send(sk);
6508 } else if (control->poll) {
6509 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6510 pi->unacked_frames) {
6511 l2cap_ertm_start_retrans_timer(pi);
6512 }
6513 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6514 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6515 l2cap_ertm_send_srej_tail(sk);
6516 } else {
6517 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6518 pi->unacked_frames) {
6519 l2cap_ertm_start_retrans_timer(pi);
6520 }
6521 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6522 l2cap_ertm_send_ack(sk);
6523 }
6524 break;
6525 case L2CAP_ERTM_EVENT_RECV_RNR:
6526 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6527 l2cap_ertm_pass_to_tx(sk, control);
6528 if (control->poll)
6529 l2cap_ertm_send_srej_tail(sk);
6530 else {
6531 struct bt_l2cap_control rr_control;
6532 memset(&rr_control, 0, sizeof(rr_control));
6533 rr_control.frame_type = 's';
6534 rr_control.super = L2CAP_SFRAME_RR;
6535 rr_control.reqseq = pi->buffer_seq;
6536 l2cap_ertm_send_sframe(sk, &rr_control);
6537 }
6538
6539 break;
6540 case L2CAP_ERTM_EVENT_RECV_REJ:
6541 l2cap_ertm_handle_rej(sk, control);
6542 break;
6543 case L2CAP_ERTM_EVENT_RECV_SREJ:
6544 l2cap_ertm_handle_srej(sk, control);
6545 break;
6546 }
6547
6548 if (skb && !skb_in_use) {
6549 BT_DBG("Freeing %p", skb);
6550 kfree_skb(skb);
6551 }
6552
6553 return err;
6554}
6555
6556static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6557 struct bt_l2cap_control *control,
6558 struct sk_buff *skb, u8 event)
6559{
6560 struct l2cap_pinfo *pi;
6561 int err = 0;
6562 bool skb_in_use = 0;
6563
6564 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6565 (int)event);
6566 pi = l2cap_pi(sk);
6567
6568 /* Only handle expected frames, to avoid state changes. */
6569
6570 switch (event) {
6571 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6572 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6573 L2CAP_ERTM_TXSEQ_EXPECTED) {
6574 l2cap_ertm_pass_to_tx(sk, control);
6575
6576 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6577 BT_DBG("Busy, discarding expected seq %d",
6578 control->txseq);
6579 break;
6580 }
6581
6582 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6583 pi->buffer_seq = pi->expected_tx_seq;
6584 skb_in_use = 1;
6585
6586 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6587 if (err)
6588 break;
6589
6590 if (control->final) {
6591 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6592 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6593 else
6594 control->final = 0;
6595 }
6596 }
6597 break;
6598 case L2CAP_ERTM_EVENT_RECV_RR:
6599 case L2CAP_ERTM_EVENT_RECV_RNR:
6600 case L2CAP_ERTM_EVENT_RECV_REJ:
6601 l2cap_ertm_process_reqseq(sk, control->reqseq);
6602 break;
6603 case L2CAP_ERTM_EVENT_RECV_SREJ:
6604 /* Ignore */
6605 break;
6606 default:
6607 break;
6608 }
6609
6610 if (skb && !skb_in_use) {
6611 BT_DBG("Freeing %p", skb);
6612 kfree_skb(skb);
6613 }
6614
6615 return err;
6616}
6617
6618static int l2cap_answer_move_poll(struct sock *sk)
6619{
6620 struct l2cap_pinfo *pi;
6621 struct bt_l2cap_control control;
6622 int err = 0;
6623
6624 BT_DBG("sk %p", sk);
6625
6626 pi = l2cap_pi(sk);
6627
6628 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6629
6630 if (!skb_queue_empty(TX_QUEUE(sk)))
6631 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6632 else
6633 sk->sk_send_head = NULL;
6634
6635 /* Rewind next_tx_seq to the point expected
6636 * by the receiver.
6637 */
6638 pi->next_tx_seq = pi->amp_move_reqseq;
6639 pi->unacked_frames = 0;
6640
6641 err = l2cap_finish_amp_move(sk);
6642
6643 if (err)
6644 return err;
6645
6646 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6647 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6648
6649 memset(&control, 0, sizeof(control));
6650 control.reqseq = pi->amp_move_reqseq;
6651
6652 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6653 err = -EPROTO;
6654 else
6655 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6656 pi->amp_move_event);
6657
6658 return err;
6659}
6660
6661static void l2cap_amp_move_setup(struct sock *sk)
6662{
6663 struct l2cap_pinfo *pi;
6664 struct sk_buff *skb;
6665
6666 BT_DBG("sk %p", sk);
6667
6668 pi = l2cap_pi(sk);
6669
6670 l2cap_ertm_stop_ack_timer(pi);
6671 l2cap_ertm_stop_retrans_timer(pi);
6672 l2cap_ertm_stop_monitor_timer(pi);
6673
6674 pi->retry_count = 0;
6675 skb_queue_walk(TX_QUEUE(sk), skb) {
6676 if (bt_cb(skb)->retries)
6677 bt_cb(skb)->retries = 1;
6678 else
6679 break;
6680 }
6681
6682 pi->expected_tx_seq = pi->buffer_seq;
6683
6684 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6685 l2cap_seq_list_clear(&pi->retrans_list);
6686 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6687 skb_queue_purge(SREJ_QUEUE(sk));
6688
6689 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6690 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6691
6692 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6693 pi->rx_state);
6694
6695 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6696}
6697
6698static void l2cap_amp_move_revert(struct sock *sk)
6699{
6700 struct l2cap_pinfo *pi;
6701
6702 BT_DBG("sk %p", sk);
6703
6704 pi = l2cap_pi(sk);
6705
6706 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6707 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6708 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6709 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6710 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6711}
6712
6713static int l2cap_amp_move_reconf(struct sock *sk)
6714{
6715 struct l2cap_pinfo *pi;
6716 u8 buf[64];
6717 int err = 0;
6718
6719 BT_DBG("sk %p", sk);
6720
6721 pi = l2cap_pi(sk);
6722
6723 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6724 l2cap_build_amp_reconf_req(sk, buf), buf);
6725 return err;
6726}
6727
6728static void l2cap_amp_move_success(struct sock *sk)
6729{
6730 struct l2cap_pinfo *pi;
6731
6732 BT_DBG("sk %p", sk);
6733
6734 pi = l2cap_pi(sk);
6735
6736 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6737 int err = 0;
6738 /* Send reconfigure request */
6739 if (pi->mode == L2CAP_MODE_ERTM) {
6740 pi->reconf_state = L2CAP_RECONF_INT;
6741 if (enable_reconfig)
6742 err = l2cap_amp_move_reconf(sk);
6743
6744 if (err || !enable_reconfig) {
6745 pi->reconf_state = L2CAP_RECONF_NONE;
6746 l2cap_ertm_tx(sk, NULL, NULL,
6747 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6748 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6749 }
6750 } else
6751 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6752 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6753 if (pi->mode == L2CAP_MODE_ERTM)
6754 pi->rx_state =
6755 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6756 else
6757 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6758 }
6759}
6760
6761static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6762{
6763 /* Make sure reqseq is for a packet that has been sent but not acked */
6764 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6765 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6766}
6767
6768static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6769 struct sk_buff *skb)
6770{
6771 struct l2cap_pinfo *pi;
6772 int err = 0;
6773
6774 BT_DBG("sk %p, control %p, skb %p, state %d",
6775 sk, control, skb, l2cap_pi(sk)->rx_state);
6776
6777 pi = l2cap_pi(sk);
6778
6779 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6780 L2CAP_ERTM_TXSEQ_EXPECTED) {
6781 l2cap_ertm_pass_to_tx(sk, control);
6782
6783 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6784 __next_seq(pi->buffer_seq, pi));
6785
6786 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6787
6788 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6789 } else {
6790 if (pi->sdu) {
6791 kfree_skb(pi->sdu);
6792 pi->sdu = NULL;
6793 }
6794 pi->sdu_last_frag = NULL;
6795 pi->sdu_len = 0;
6796
6797 if (skb) {
6798 BT_DBG("Freeing %p", skb);
6799 kfree_skb(skb);
6800 }
6801 }
6802
6803 pi->last_acked_seq = control->txseq;
6804 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6805
6806 return err;
6807}
6808
6809static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6810 struct sk_buff *skb, u8 event)
6811{
6812 struct l2cap_pinfo *pi;
6813 int err = 0;
6814
6815 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6816 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6817
6818 pi = l2cap_pi(sk);
6819
6820 if (__valid_reqseq(pi, control->reqseq)) {
6821 switch (pi->rx_state) {
6822 case L2CAP_ERTM_RX_STATE_RECV:
6823 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6824 break;
6825 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6826 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6827 event);
6828 break;
6829 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6830 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6831 event);
6832 break;
6833 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6834 if (control->final) {
6835 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6836 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6837
6838 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6839 l2cap_ertm_process_reqseq(sk, control->reqseq);
6840
6841 if (!skb_queue_empty(TX_QUEUE(sk)))
6842 sk->sk_send_head =
6843 skb_peek(TX_QUEUE(sk));
6844 else
6845 sk->sk_send_head = NULL;
6846
6847 /* Rewind next_tx_seq to the point expected
6848 * by the receiver.
6849 */
6850 pi->next_tx_seq = control->reqseq;
6851 pi->unacked_frames = 0;
6852
6853 if (pi->ampcon)
6854 pi->conn->mtu =
6855 pi->ampcon->hdev->acl_mtu;
6856 else
6857 pi->conn->mtu =
6858 pi->conn->hcon->hdev->acl_mtu;
6859
6860 err = l2cap_setup_resegment(sk);
6861
6862 if (err)
6863 break;
6864
6865 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6866 event);
6867 }
6868 break;
6869 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6870 if (control->poll) {
6871 pi->amp_move_reqseq = control->reqseq;
6872 pi->amp_move_event = event;
6873 err = l2cap_answer_move_poll(sk);
6874 }
6875 break;
6876 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6877 if (control->poll) {
6878 pi->amp_move_reqseq = control->reqseq;
6879 pi->amp_move_event = event;
6880
6881 BT_DBG("amp_move_role 0x%2.2x, "
6882 "reconf_state 0x%2.2x",
6883 pi->amp_move_role, pi->reconf_state);
6884
6885 if (pi->reconf_state == L2CAP_RECONF_ACC)
6886 err = l2cap_amp_move_reconf(sk);
6887 else
6888 err = l2cap_answer_move_poll(sk);
6889 }
6890 break;
6891 default:
6892 /* shut it down */
6893 break;
6894 }
6895 } else {
6896 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6897 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6898 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6899 }
6900
6901 return err;
6902}
6903
6904void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6905{
6906 lock_sock(sk);
6907
6908 l2cap_pi(sk)->fixed_channel = 1;
6909
6910 l2cap_pi(sk)->imtu = opt->imtu;
6911 l2cap_pi(sk)->omtu = opt->omtu;
6912 l2cap_pi(sk)->remote_mps = opt->omtu;
6913 l2cap_pi(sk)->mps = opt->omtu;
6914 l2cap_pi(sk)->flush_to = opt->flush_to;
6915 l2cap_pi(sk)->mode = opt->mode;
6916 l2cap_pi(sk)->fcs = opt->fcs;
6917 l2cap_pi(sk)->max_tx = opt->max_tx;
6918 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6919 l2cap_pi(sk)->tx_win = opt->txwin_size;
6920 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6921 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6922 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6923
6924 if (opt->mode == L2CAP_MODE_ERTM ||
6925 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6926 l2cap_ertm_init(sk);
6927
6928 release_sock(sk);
6929
6930 return;
6931}
6932
6933static const u8 l2cap_ertm_rx_func_to_event[4] = {
6934 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6935 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6936};
6937
6938int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6939{
6940 struct l2cap_pinfo *pi;
6941 struct bt_l2cap_control *control;
6942 u16 len;
6943 u8 event;
6944 pi = l2cap_pi(sk);
6945
6946 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6947
6948 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 goto drop;
6950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006951 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006952 case L2CAP_MODE_BASIC:
6953 /* If socket recv buffers overflows we drop data here
6954 * which is *bad* because L2CAP has to be reliable.
6955 * But we don't have any other choice. L2CAP doesn't
6956 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006958 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006959 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006961 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006962 goto done;
6963 break;
6964
6965 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006966 case L2CAP_MODE_STREAMING:
6967 control = &bt_cb(skb)->control;
6968 if (pi->extended_control) {
6969 __get_extended_control(get_unaligned_le32(skb->data),
6970 control);
6971 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006972 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006973 __get_enhanced_control(get_unaligned_le16(skb->data),
6974 control);
6975 skb_pull(skb, 2);
6976 }
6977
6978 len = skb->len;
6979
6980 if (l2cap_check_fcs(pi, skb))
6981 goto drop;
6982
6983 if ((control->frame_type == 'i') &&
6984 (control->sar == L2CAP_SAR_START))
6985 len -= 2;
6986
6987 if (pi->fcs == L2CAP_FCS_CRC16)
6988 len -= 2;
6989
6990 /*
6991 * We can just drop the corrupted I-frame here.
6992 * Receiver will miss it and start proper recovery
6993 * procedures and ask for retransmission.
6994 */
6995 if (len > pi->mps) {
6996 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6997 goto drop;
6998 }
6999
7000 if (control->frame_type == 'i') {
7001
7002 int err;
7003
7004 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7005 control->sar, control->reqseq, control->final,
7006 control->txseq);
7007
7008 /* Validate F-bit - F=0 always valid, F=1 only
7009 * valid in TX WAIT_F
7010 */
7011 if (control->final && (pi->tx_state !=
7012 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007013 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007014
7015 if (pi->mode != L2CAP_MODE_STREAMING) {
7016 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
7017 err = l2cap_ertm_rx(sk, control, skb, event);
7018 } else
7019 err = l2cap_strm_rx(sk, control, skb);
7020 if (err)
7021 l2cap_send_disconn_req(pi->conn, sk,
7022 ECONNRESET);
7023 } else {
7024 /* Only I-frames are expected in streaming mode */
7025 if (pi->mode == L2CAP_MODE_STREAMING)
7026 goto drop;
7027
7028 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7029 control->reqseq, control->final, control->poll,
7030 control->super);
7031
7032 if (len != 0) {
7033 l2cap_send_disconn_req(pi->conn, sk,
7034 ECONNRESET);
7035 goto drop;
7036 }
7037
7038 /* Validate F and P bits */
7039 if (control->final &&
7040 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
7041 || control->poll))
7042 goto drop;
7043
7044 event = l2cap_ertm_rx_func_to_event[control->super];
7045 if (l2cap_ertm_rx(sk, control, skb, event))
7046 l2cap_send_disconn_req(pi->conn, sk,
7047 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007048 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007049
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007050 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007051
7052 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007053 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007054 break;
7055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007056
7057drop:
7058 kfree_skb(skb);
7059
7060done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061 return 0;
7062}
7063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007064void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7065{
7066 lock_sock(sk);
7067 l2cap_data_channel(sk, skb);
7068 release_sock(sk);
7069}
7070
Al Viro8e036fc2007-07-29 00:16:36 -07007071static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007072{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007073 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007075 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7076 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007077 goto drop;
7078
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007079 bh_lock_sock(sk);
7080
Linus Torvalds1da177e2005-04-16 15:20:36 -07007081 BT_DBG("sk %p, len %d", sk, skb->len);
7082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007083 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007084 goto drop;
7085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007086 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007087 goto drop;
7088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007089 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007090 goto done;
7091
7092drop:
7093 kfree_skb(skb);
7094
7095done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007096 if (sk)
7097 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007098 return 0;
7099}
7100
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007101static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7102{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007103 struct sock *sk;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007104 struct sk_buff *skb_rsp;
7105 struct l2cap_hdr *lh;
7106 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7107 L2CAP_ATT_NOT_SUPPORTED};
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007108
Inga Stotlandf214b6e2011-10-11 08:56:15 -07007109 sk = l2cap_get_sock_by_fixed_scid(0, cid, conn->src, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007110 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007111 goto drop;
7112
7113 bh_lock_sock(sk);
7114
7115 BT_DBG("sk %p, len %d", sk, skb->len);
7116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007117 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007118 goto drop;
7119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007120 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007121 goto drop;
7122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007123 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007124 goto done;
7125
7126drop:
Brian Gix7eaa64d2011-10-19 13:17:42 -07007127 if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
7128 skb->data[0] != L2CAP_ATT_INDICATE)
7129 goto free_skb;
7130
7131 /* If this is an incoming PDU that requires a response, respond with
7132 * a generic error so remote device doesn't hang */
7133
7134 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7135 if (!skb_rsp)
7136 goto free_skb;
7137
7138 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7139 lh->len = cpu_to_le16(sizeof(err_rsp));
7140 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7141 err_rsp[1] = skb->data[0];
7142 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7143 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7144
7145free_skb:
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007146 kfree_skb(skb);
7147
7148done:
7149 if (sk)
7150 bh_unlock_sock(sk);
7151 return 0;
7152}
7153
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7155{
7156 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007157 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007158 u16 cid, len;
7159 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160
7161 skb_pull(skb, L2CAP_HDR_SIZE);
7162 cid = __le16_to_cpu(lh->cid);
7163 len = __le16_to_cpu(lh->len);
7164
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007165 if (len != skb->len) {
7166 kfree_skb(skb);
7167 return;
7168 }
7169
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7171
7172 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007173 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007174 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007175 l2cap_sig_channel(conn, skb);
7176 break;
7177
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007178 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007179 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180 skb_pull(skb, 2);
7181 l2cap_conless_channel(conn, psm, skb);
7182 break;
7183
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007184 case L2CAP_CID_LE_DATA:
7185 l2cap_att_channel(conn, cid, skb);
7186 break;
7187
Anderson Brigliaea370122011-06-07 18:46:31 -03007188 case L2CAP_CID_SMP:
7189 if (smp_sig_channel(conn, skb))
7190 l2cap_conn_del(conn->hcon, EACCES);
7191 break;
7192
Linus Torvalds1da177e2005-04-16 15:20:36 -07007193 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007194 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7195 if (sk) {
7196 if (sock_owned_by_user(sk)) {
7197 BT_DBG("backlog sk %p", sk);
7198 if (sk_add_backlog(sk, skb))
7199 kfree_skb(skb);
7200 } else
7201 l2cap_data_channel(sk, skb);
7202
7203 bh_unlock_sock(sk);
7204 } else if (cid == L2CAP_CID_A2MP) {
7205 BT_DBG("A2MP");
7206 amp_conn_ind(conn, skb);
7207 } else {
7208 BT_DBG("unknown cid 0x%4.4x", cid);
7209 kfree_skb(skb);
7210 }
7211
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 break;
7213 }
7214}
7215
7216/* ---- L2CAP interface with lower layer (HCI) ---- */
7217
7218static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7219{
7220 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007221 register struct sock *sk;
7222 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007223
7224 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007225 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007226
7227 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7228
7229 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007230 read_lock(&l2cap_sk_list.lock);
7231 sk_for_each(sk, node, &l2cap_sk_list.head) {
7232 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007233 continue;
7234
7235 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007236 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007237 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007238 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007239 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007240 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7241 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007242 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007243 lm2 |= HCI_LM_MASTER;
7244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007246 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007247
7248 return exact ? lm1 : lm2;
7249}
7250
7251static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7252{
Marcel Holtmann01394182006-07-03 10:02:46 +02007253 struct l2cap_conn *conn;
7254
Linus Torvalds1da177e2005-04-16 15:20:36 -07007255 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7256
Ville Tervoacd7d372011-02-10 22:38:49 -03007257 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007258 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259
7260 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261 conn = l2cap_conn_add(hcon, status);
7262 if (conn)
7263 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007264 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007265 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007266
7267 return 0;
7268}
7269
Marcel Holtmann2950f212009-02-12 14:02:50 +01007270static int l2cap_disconn_ind(struct hci_conn *hcon)
7271{
7272 struct l2cap_conn *conn = hcon->l2cap_data;
7273
7274 BT_DBG("hcon %p", hcon);
7275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007276 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007277 return 0x13;
7278
7279 return conn->disc_reason;
7280}
7281
7282static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007283{
7284 BT_DBG("hcon %p reason %d", hcon, reason);
7285
Ville Tervoacd7d372011-02-10 22:38:49 -03007286 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007287 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007289 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007290
Linus Torvalds1da177e2005-04-16 15:20:36 -07007291 return 0;
7292}
7293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007294static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007295{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007296 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007297 return;
7298
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007299 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007300 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7301 l2cap_sock_clear_timer(sk);
7302 l2cap_sock_set_timer(sk, HZ * 5);
7303 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7304 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007305 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007306 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7307 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007308 }
7309}
7310
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007311static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007312{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007313 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007314 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007315 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007316
Marcel Holtmann01394182006-07-03 10:02:46 +02007317 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007320 l = &conn->chan_list;
7321
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322 BT_DBG("conn %p", conn);
7323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007324 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007326 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007327 bh_lock_sock(sk);
7328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007329 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007331 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007332 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007333 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007334
Brian Gixe9ceb522011-09-22 10:46:35 -07007335 del_timer(&hcon->smp_timer);
Brian Gixa68668b2011-08-11 15:49:36 -07007336 l2cap_chan_ready(sk);
7337 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007338
7339 bh_unlock_sock(sk);
7340 continue;
7341 }
7342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007343 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007344 bh_unlock_sock(sk);
7345 continue;
7346 }
7347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007348 if (!status && (sk->sk_state == BT_CONNECTED ||
7349 sk->sk_state == BT_CONFIG)) {
7350 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007351 bh_unlock_sock(sk);
7352 continue;
7353 }
7354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007355 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007356 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007357 l2cap_pi(sk)->conf_state |=
7358 L2CAP_CONF_CONNECT_PEND;
7359 if (l2cap_pi(sk)->amp_pref ==
7360 BT_AMP_POLICY_PREFER_AMP) {
7361 amp_create_physical(l2cap_pi(sk)->conn,
7362 sk);
7363 } else
7364 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007365 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007366 l2cap_sock_clear_timer(sk);
7367 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007368 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007369 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007370 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007371 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007372
7373 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007374 if (l2cap_pi(sk)->amp_id) {
7375 amp_accept_physical(conn,
7376 l2cap_pi(sk)->amp_id, sk);
7377 bh_unlock_sock(sk);
7378 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007379 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007380
7381 sk->sk_state = BT_CONFIG;
7382 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007383 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007384 sk->sk_state = BT_DISCONN;
7385 l2cap_sock_set_timer(sk, HZ / 10);
7386 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007387 }
7388
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007389 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7390 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7391 rsp.result = cpu_to_le16(result);
7392 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7393 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7394 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395 }
7396
Linus Torvalds1da177e2005-04-16 15:20:36 -07007397 bh_unlock_sock(sk);
7398 }
7399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007400 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007401
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 return 0;
7403}
7404
7405static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7406{
7407 struct l2cap_conn *conn = hcon->l2cap_data;
7408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007409 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7410 goto drop;
7411
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007412 if (!conn)
7413 conn = l2cap_conn_add(hcon, 0);
7414
7415 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 goto drop;
7417
7418 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007420 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421 struct l2cap_hdr *hdr;
7422 int len;
7423
7424 if (conn->rx_len) {
7425 BT_ERR("Unexpected start frame (len %d)", skb->len);
7426 kfree_skb(conn->rx_skb);
7427 conn->rx_skb = NULL;
7428 conn->rx_len = 0;
7429 l2cap_conn_unreliable(conn, ECOMM);
7430 }
7431
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007432 /* Start fragment always begin with Basic L2CAP header */
7433 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007434 BT_ERR("Frame is too short (len %d)", skb->len);
7435 l2cap_conn_unreliable(conn, ECOMM);
7436 goto drop;
7437 }
7438
7439 hdr = (struct l2cap_hdr *) skb->data;
7440 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7441
7442 if (len == skb->len) {
7443 /* Complete frame received */
7444 l2cap_recv_frame(conn, skb);
7445 return 0;
7446 }
7447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007448 if (flags & ACL_CONT) {
7449 BT_ERR("Complete frame is incomplete "
7450 "(len %d, expected len %d)",
7451 skb->len, len);
7452 l2cap_conn_unreliable(conn, ECOMM);
7453 goto drop;
7454 }
7455
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7457
7458 if (skb->len > len) {
7459 BT_ERR("Frame is too long (len %d, expected len %d)",
7460 skb->len, len);
7461 l2cap_conn_unreliable(conn, ECOMM);
7462 goto drop;
7463 }
7464
7465 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007466 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7467 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 goto drop;
7469
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007470 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007471 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472 conn->rx_len = len - skb->len;
7473 } else {
7474 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7475
7476 if (!conn->rx_len) {
7477 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7478 l2cap_conn_unreliable(conn, ECOMM);
7479 goto drop;
7480 }
7481
7482 if (skb->len > conn->rx_len) {
7483 BT_ERR("Fragment is too long (len %d, expected %d)",
7484 skb->len, conn->rx_len);
7485 kfree_skb(conn->rx_skb);
7486 conn->rx_skb = NULL;
7487 conn->rx_len = 0;
7488 l2cap_conn_unreliable(conn, ECOMM);
7489 goto drop;
7490 }
7491
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007492 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007493 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494 conn->rx_len -= skb->len;
7495
7496 if (!conn->rx_len) {
7497 /* Complete frame received */
7498 l2cap_recv_frame(conn, conn->rx_skb);
7499 conn->rx_skb = NULL;
7500 }
7501 }
7502
7503drop:
7504 kfree_skb(skb);
7505 return 0;
7506}
7507
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007508static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007510 struct sock *sk;
7511 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007513 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007515 sk_for_each(sk, node, &l2cap_sk_list.head) {
7516 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007518 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007519 batostr(&bt_sk(sk)->src),
7520 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007521 sk->sk_state, __le16_to_cpu(pi->psm),
7522 pi->scid, pi->dcid,
7523 pi->imtu, pi->omtu, pi->sec_level,
7524 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007527 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007528
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007529 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530}
7531
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007532static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7533{
7534 return single_open(file, l2cap_debugfs_show, inode->i_private);
7535}
7536
7537static const struct file_operations l2cap_debugfs_fops = {
7538 .open = l2cap_debugfs_open,
7539 .read = seq_read,
7540 .llseek = seq_lseek,
7541 .release = single_release,
7542};
7543
7544static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546static struct hci_proto l2cap_hci_proto = {
7547 .name = "L2CAP",
7548 .id = HCI_PROTO_L2CAP,
7549 .connect_ind = l2cap_connect_ind,
7550 .connect_cfm = l2cap_connect_cfm,
7551 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007552 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007553 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007554 .recv_acldata = l2cap_recv_acldata,
7555 .create_cfm = l2cap_create_cfm,
7556 .modify_cfm = l2cap_modify_cfm,
7557 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007558};
7559
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007560int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007561{
7562 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007563
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007564 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565 if (err < 0)
7566 return err;
7567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007568 _l2cap_wq = create_singlethread_workqueue("l2cap");
7569 if (!_l2cap_wq) {
7570 err = -ENOMEM;
7571 goto error;
7572 }
7573
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574 err = hci_register_proto(&l2cap_hci_proto);
7575 if (err < 0) {
7576 BT_ERR("L2CAP protocol registration failed");
7577 bt_sock_unregister(BTPROTO_L2CAP);
7578 goto error;
7579 }
7580
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007581 if (bt_debugfs) {
7582 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7583 bt_debugfs, NULL, &l2cap_debugfs_fops);
7584 if (!l2cap_debugfs)
7585 BT_ERR("Failed to create L2CAP debug file");
7586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007588 if (amp_init() < 0) {
7589 BT_ERR("AMP Manager initialization failed");
7590 goto error;
7591 }
7592
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 return 0;
7594
7595error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007596 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007597 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 return err;
7599}
7600
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007601void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007603 amp_exit();
7604
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007605 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007606
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007607 flush_workqueue(_l2cap_wq);
7608 destroy_workqueue(_l2cap_wq);
7609
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7611 BT_ERR("L2CAP protocol unregistration failed");
7612
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007613 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007614}
7615
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007616module_param(disable_ertm, bool, 0644);
7617MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007618
7619module_param(enable_reconfig, bool, 0644);
7620MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");