blob: de3ab22e62dda8695a3ecd77d9a91c3e3e01bbe1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Srinivas Krovvidi10734192011-12-29 07:29:11 +05303 Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
Mat Martineau3b9239a2012-02-16 11:54:30 -080092static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process);
Srinivas Krovvidi10734192011-12-29 07:29:11 +053093static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l);
94static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030095
Marcel Holtmann01394182006-07-03 10:02:46 +020096/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030098{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 struct sock *s;
100 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
101 if (l2cap_pi(s)->dcid == cid)
102 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200105}
106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107/* Find channel with given DCID.
108 * Returns locked socket */
109static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
110 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200111{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 struct sock *s;
113 read_lock(&l->lock);
114 s = __l2cap_get_chan_by_dcid(l, cid);
115 if (s)
116 bh_lock_sock(s);
117 read_unlock(&l->lock);
118 return s;
119}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
122{
123 struct sock *s;
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->scid == cid)
126 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200129}
130
131/* Find channel with given SCID.
132 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200134{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 struct sock *s;
136 read_lock(&l->lock);
137 s = __l2cap_get_chan_by_scid(l, cid);
138 if (s)
139 bh_lock_sock(s);
140 read_unlock(&l->lock);
141 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200142}
143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200145{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146 struct sock *s;
147 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
148 if (l2cap_pi(s)->ident == ident)
149 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200150 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 return s;
152}
153
154static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155{
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_ident(l, ident);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163}
164
165static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
166 u16 seq)
167{
168 struct sk_buff *skb;
169
170 skb_queue_walk(head, skb) {
171 if (bt_cb(skb)->control.txseq == seq)
172 return skb;
173 }
174
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300175 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200176}
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200179{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 u16 allocSize = 1;
181 int err = 0;
182 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 /* Actual allocated size must be a power of 2 */
185 while (allocSize && allocSize <= size)
186 allocSize <<= 1;
187 if (!allocSize)
188 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200189
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
191 if (!seq_list->list)
192 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 seq_list->size = allocSize;
195 seq_list->mask = allocSize - 1;
196 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
197 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
198 for (i = 0; i < allocSize; i++)
199 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300201 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300202}
203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300207}
208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
210 u16 seq)
211{
212 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
213}
214
215static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
216{
217 u16 mask = seq_list->mask;
218
219 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
220
221 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
222 /* In case someone tries to pop the head of an empty list */
223 BT_DBG("List empty");
224 return L2CAP_SEQ_LIST_CLEAR;
225 } else if (seq_list->head == seq) {
226 /* Head can be removed quickly */
227 BT_DBG("Remove head");
228 seq_list->head = seq_list->list[seq & mask];
229 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
230
231 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
232 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
233 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
234 }
235 } else {
236 /* Non-head item must be found first */
237 u16 prev = seq_list->head;
238 BT_DBG("Find and remove");
239 while (seq_list->list[prev & mask] != seq) {
240 prev = seq_list->list[prev & mask];
241 if (prev == L2CAP_SEQ_LIST_TAIL) {
242 BT_DBG("seq %d not in list", (int) seq);
243 return L2CAP_SEQ_LIST_CLEAR;
244 }
245 }
246
247 seq_list->list[prev & mask] = seq_list->list[seq & mask];
248 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
249 if (seq_list->tail == seq)
250 seq_list->tail = prev;
251 }
252 return seq;
253}
254
255static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
256{
257 return l2cap_seq_list_remove(seq_list, seq_list->head);
258}
259
260static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
261{
262 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
263 u16 i;
264 for (i = 0; i < seq_list->size; i++)
265 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
266
267 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
268 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
269 }
270}
271
272static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
273{
274 u16 mask = seq_list->mask;
275
276 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
277
278 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
279 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
280 seq_list->head = seq;
281 else
282 seq_list->list[seq_list->tail & mask] = seq;
283
284 seq_list->tail = seq;
285 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
286 }
287}
288
289static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
290{
291 u16 packed;
292
293 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
294 L2CAP_CTRL_REQSEQ;
295 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
296 L2CAP_CTRL_FINAL;
297
298 if (control->frame_type == 's') {
299 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
300 L2CAP_CTRL_POLL;
301 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
302 L2CAP_CTRL_SUPERVISE;
303 packed |= L2CAP_CTRL_FRAME_TYPE;
304 } else {
305 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
306 L2CAP_CTRL_SAR;
307 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
308 L2CAP_CTRL_TXSEQ;
309 }
310
311 return packed;
312}
313
314static void __get_enhanced_control(u16 enhanced,
315 struct bt_l2cap_control *control)
316{
317 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
318 L2CAP_CTRL_REQSEQ_SHIFT;
319 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
320 L2CAP_CTRL_FINAL_SHIFT;
321
322 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
323 control->frame_type = 's';
324 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
325 L2CAP_CTRL_POLL_SHIFT;
326 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
327 L2CAP_CTRL_SUPERVISE_SHIFT;
328
329 control->sar = 0;
330 control->txseq = 0;
331 } else {
332 control->frame_type = 'i';
333 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
334 L2CAP_CTRL_SAR_SHIFT;
335 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
336 L2CAP_CTRL_TXSEQ_SHIFT;
337
338 control->poll = 0;
339 control->super = 0;
340 }
341}
342
343static u32 __pack_extended_control(struct bt_l2cap_control *control)
344{
345 u32 packed;
346
347 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
348 L2CAP_EXT_CTRL_REQSEQ;
349 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
350 L2CAP_EXT_CTRL_FINAL;
351
352 if (control->frame_type == 's') {
353 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
354 L2CAP_EXT_CTRL_POLL;
355 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
356 L2CAP_EXT_CTRL_SUPERVISE;
357 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
358 } else {
359 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
360 L2CAP_EXT_CTRL_SAR;
361 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
362 L2CAP_EXT_CTRL_TXSEQ;
363 }
364
365 return packed;
366}
367
368static void __get_extended_control(u32 extended,
369 struct bt_l2cap_control *control)
370{
371 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
372 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
373 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
374 L2CAP_EXT_CTRL_FINAL_SHIFT;
375
376 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
377 control->frame_type = 's';
378 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
379 L2CAP_EXT_CTRL_POLL_SHIFT;
380 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
381 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
382
383 control->sar = 0;
384 control->txseq = 0;
385 } else {
386 control->frame_type = 'i';
387 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
388 L2CAP_EXT_CTRL_SAR_SHIFT;
389 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
390 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
391
392 control->poll = 0;
393 control->super = 0;
394 }
395}
396
397static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
398{
399 BT_DBG("pi %p", pi);
400 __cancel_delayed_work(&pi->ack_work);
401}
402
403static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
404{
405 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
406 if (!delayed_work_pending(&pi->ack_work)) {
407 queue_delayed_work(_l2cap_wq, &pi->ack_work,
408 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
409 }
410}
411
412static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
413{
414 BT_DBG("pi %p", pi);
415 __cancel_delayed_work(&pi->retrans_work);
416}
417
418static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
419{
420 BT_DBG("pi %p", pi);
421 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
422 __cancel_delayed_work(&pi->retrans_work);
423 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
424 msecs_to_jiffies(pi->retrans_timeout));
425 }
426}
427
428static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
429{
430 BT_DBG("pi %p", pi);
431 __cancel_delayed_work(&pi->monitor_work);
432}
433
434static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
435{
436 BT_DBG("pi %p", pi);
437 l2cap_ertm_stop_retrans_timer(pi);
438 __cancel_delayed_work(&pi->monitor_work);
439 if (pi->monitor_timeout) {
440 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
441 msecs_to_jiffies(pi->monitor_timeout));
442 }
443}
444
445static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200446{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200448
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300449 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200451 return cid;
452 }
453
454 return 0;
455}
456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300458{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 if (l->head)
462 l2cap_pi(l->head)->prev_c = sk;
463
464 l2cap_pi(sk)->next_c = l->head;
465 l2cap_pi(sk)->prev_c = NULL;
466 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300467}
468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 write_lock_bh(&l->lock);
474 if (sk == l->head)
475 l->head = next;
476
477 if (next)
478 l2cap_pi(next)->prev_c = prev;
479 if (prev)
480 l2cap_pi(prev)->next_c = next;
481 write_unlock_bh(&l->lock);
482
483 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300484}
485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300489
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300490 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200492
Marcel Holtmann2950f212009-02-12 14:02:50 +0100493 conn->disc_reason = 0x13;
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 if (!l2cap_pi(sk)->fixed_channel &&
498 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300499 if (conn->hcon->type == LE_LINK) {
500 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
503 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
504 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
505
506 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
507 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300508 } else {
509 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
511 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300512 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200514 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
516 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
517 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
518 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200519 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
521 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
522 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200523 }
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530524
525 if (l2cap_get_smallest_flushto(l) > l2cap_pi(sk)->flush_to) {
526 /*if flush timeout of the channel is lesser than existing */
527 l2cap_set_acl_flushto(conn->hcon, l2cap_pi(sk)->flush_to);
528 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 /* Otherwise, do not set scid/dcid/omtu. These will be set up
530 * by l2cap_fixed_channel_config()
531 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200534}
535
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900536/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200537 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200539{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200541 struct sock *parent = bt_sk(sk)->parent;
542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 if (conn) {
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530548 struct l2cap_chan_list *l = &conn->chan_list;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 /* Unlink from channel list */
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530550 l2cap_chan_unlink(l, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 l2cap_pi(sk)->conn = NULL;
552 if (!l2cap_pi(sk)->fixed_channel)
553 hci_conn_put(conn->hcon);
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530554
555 read_lock(&l->lock);
556 if (l2cap_pi(sk)->flush_to < l2cap_get_smallest_flushto(l))
557 l2cap_set_acl_flushto(conn->hcon,
558 l2cap_get_smallest_flushto(l));
559 read_unlock(&l->lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200560 }
561
Mat Martineau9f8d4672011-12-14 12:10:46 -0800562 if (l2cap_pi(sk)->ampchan) {
563 struct hci_chan *ampchan = l2cap_pi(sk)->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -0800564 struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
Mat Martineau9f8d4672011-12-14 12:10:46 -0800565 l2cap_pi(sk)->ampchan = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -0800566 l2cap_pi(sk)->ampcon = NULL;
567 l2cap_pi(sk)->amp_id = 0;
568 if (hci_chan_put(ampchan))
569 ampcon->l2cap_data = NULL;
570 else
Peter Krystad2850a822012-02-29 16:58:03 -0800571 l2cap_deaggregate(ampchan, l2cap_pi(sk));
Mat Martineau9f8d4672011-12-14 12:10:46 -0800572 }
573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200575 sock_set_flag(sk, SOCK_ZAPPED);
576
577 if (err)
578 sk->sk_err = err;
579
580 if (parent) {
581 bt_accept_unlink(sk);
582 parent->sk_data_ready(parent, 0);
583 } else
584 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300585
Mat Martineau380dcd42011-12-19 10:11:30 -0800586 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
590 if (l2cap_pi(sk)->sdu)
591 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
596 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
597 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300598 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200599}
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300602{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 if (sk->sk_type == SOCK_RAW) {
604 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530605 case BT_SECURITY_HIGH:
606 return HCI_AT_DEDICATED_BONDING_MITM;
607 case BT_SECURITY_MEDIUM:
608 return HCI_AT_DEDICATED_BONDING;
609 default:
610 return HCI_AT_NO_BONDING;
611 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
613 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
614 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530617 return HCI_AT_NO_BONDING_MITM;
618 else
619 return HCI_AT_NO_BONDING;
620 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530622 case BT_SECURITY_HIGH:
623 return HCI_AT_GENERAL_BONDING_MITM;
624 case BT_SECURITY_MEDIUM:
625 return HCI_AT_GENERAL_BONDING;
626 default:
627 return HCI_AT_NO_BONDING;
628 }
629 }
630}
631
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200632/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200634{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100636 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200637
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
641 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200642}
643
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200645{
646 u8 id;
647
648 /* Get next available identificator.
649 * 1 - 128 are used by kernel.
650 * 129 - 199 are reserved.
651 * 200 - 254 are used by utilities like l2ping, etc.
652 */
653
654 spin_lock_bh(&conn->lock);
655
656 if (++conn->tx_ident > 128)
657 conn->tx_ident = 1;
658
659 id = conn->tx_ident;
660
661 spin_unlock_bh(&conn->lock);
662
663 return id;
664}
665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666static void apply_fcs(struct sk_buff *skb)
667{
668 size_t len;
669 u16 partial_crc;
670 struct sk_buff *iter;
671 struct sk_buff *final_frag = skb;
672
673 if (skb_has_frag_list(skb))
674 len = skb_headlen(skb);
675 else
676 len = skb->len - L2CAP_FCS_SIZE;
677
678 partial_crc = crc16(0, (u8 *) skb->data, len);
679
680 skb_walk_frags(skb, iter) {
681 len = iter->len;
682 if (!iter->next)
683 len -= L2CAP_FCS_SIZE;
684
685 partial_crc = crc16(partial_crc, iter->data, len);
686 final_frag = iter;
687 }
688
689 put_unaligned_le16(partial_crc,
690 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
691}
692
693void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200694{
695 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200696 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697
698 BT_DBG("code 0x%2.2x", code);
699
700 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300701 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200702
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200703 if (lmp_no_flush_capable(conn->hcon->hdev))
704 flags = ACL_START_NO_FLUSH;
705 else
706 flags = ACL_START;
707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200711}
712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300714{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300716}
717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300719{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720 struct l2cap_conn_req req;
721 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
722 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_create_chan_req req;
733 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
734 req.psm = l2cap_pi(sk)->psm;
735 req.amp_id = amp_id;
736
737 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
738 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
739
740 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
741 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300742}
743
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200745{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200747
748 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100749 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
750 return;
751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
753 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200754
Peter Krystadc446d212011-09-20 15:35:50 -0700755 if (l2cap_pi(sk)->amp_pref ==
756 BT_AMP_POLICY_PREFER_AMP &&
757 conn->fc_mask & L2CAP_FC_A2MP)
758 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 else
760 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200761 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200762 } else {
763 struct l2cap_info_req req;
764 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
765
766 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
767 conn->info_ident = l2cap_get_ident(conn);
768
769 mod_timer(&conn->info_timer, jiffies +
770 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
771
772 l2cap_send_cmd(conn, conn->info_ident,
773 L2CAP_INFO_REQ, sizeof(req), &req);
774 }
775}
776
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300777static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
778{
779 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300780 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300781 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
782
783 switch (mode) {
784 case L2CAP_MODE_ERTM:
785 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
786 case L2CAP_MODE_STREAMING:
787 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
788 default:
789 return 0x00;
790 }
791}
792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300794{
795 struct l2cap_disconn_req req;
796
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300797 if (!conn)
798 return;
799
Mat Martineau380dcd42011-12-19 10:11:30 -0800800 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700803 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
804 skb_queue_purge(SREJ_QUEUE(sk));
805
806 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
807 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
808 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300809 }
810
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
812 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300813 l2cap_send_cmd(conn, l2cap_get_ident(conn),
814 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300817 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300818}
819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200821static void l2cap_conn_start(struct l2cap_conn *conn)
822{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 struct l2cap_chan_list *l = &conn->chan_list;
824 struct sock_del_list del, *tmp1, *tmp2;
825 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200826
827 BT_DBG("conn %p", conn);
828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200834 bh_lock_sock(sk);
835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 if (sk->sk_type != SOCK_SEQPACKET &&
837 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200838 bh_unlock_sock(sk);
839 continue;
840 }
841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 if (sk->sk_state == BT_CONNECT) {
843 if (!l2cap_check_security(sk) ||
844 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300845 bh_unlock_sock(sk);
846 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200847 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
850 conn->feat_mask)
851 && l2cap_pi(sk)->conf_state &
852 L2CAP_CONF_STATE2_DEVICE) {
853 tmp1 = kzalloc(sizeof(struct sock_del_list),
854 GFP_ATOMIC);
855 tmp1->sk = sk;
856 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300857 bh_unlock_sock(sk);
858 continue;
859 }
860
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300862
Peter Krystadc446d212011-09-20 15:35:50 -0700863 if (l2cap_pi(sk)->amp_pref ==
864 BT_AMP_POLICY_PREFER_AMP &&
865 conn->fc_mask & L2CAP_FC_A2MP)
866 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 else
868 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200871 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300872 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
874 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100877 if (bt_sk(sk)->defer_setup) {
878 struct sock *parent = bt_sk(sk)->parent;
879 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
880 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700881 if (parent)
882 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100883
884 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100886 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
888 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200889 } else {
890 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
891 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
892 }
893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
895 l2cap_pi(sk)->amp_id) {
896 amp_accept_physical(conn,
897 l2cap_pi(sk)->amp_id, sk);
898 bh_unlock_sock(sk);
899 continue;
900 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
903 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
904
905 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300906 rsp.result != L2CAP_CR_SUCCESS) {
907 bh_unlock_sock(sk);
908 continue;
909 }
910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300912 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 l2cap_build_conf_req(sk, buf), buf);
914 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200915 }
916
917 bh_unlock_sock(sk);
918 }
919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 read_unlock(&l->lock);
921
922 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
923 bh_lock_sock(tmp1->sk);
924 __l2cap_sock_close(tmp1->sk, ECONNRESET);
925 bh_unlock_sock(tmp1->sk);
926 list_del(&tmp1->list);
927 kfree(tmp1);
928 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200929}
930
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700931/* Find socket with fixed cid with given source and destination bdaddrs.
Brian Gix20de7cf2012-02-02 14:56:51 -0800932 * Direction of the req/rsp must match.
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700933 */
Brian Gix20de7cf2012-02-02 14:56:51 -0800934struct sock *l2cap_find_sock_by_fixed_cid_and_dir(__le16 cid, bdaddr_t *src,
935 bdaddr_t *dst, int incoming)
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700936{
937 struct sock *sk = NULL, *sk1 = NULL;
938 struct hlist_node *node;
939
Brian Gix20de7cf2012-02-02 14:56:51 -0800940 BT_DBG(" %d", incoming);
941
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700942 read_lock(&l2cap_sk_list.lock);
943
944 sk_for_each(sk, node, &l2cap_sk_list.head) {
Brian Gix20de7cf2012-02-02 14:56:51 -0800945
946 if (incoming && !l2cap_pi(sk)->incoming)
947 continue;
948
949 if (!incoming && l2cap_pi(sk)->incoming)
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700950 continue;
951
952 if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
953 /* Exact match. */
954 if (!bacmp(&bt_sk(sk)->src, src))
955 break;
956
957 /* Closest match */
958 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
959 sk1 = sk;
960 }
961 }
962
963 read_unlock(&l2cap_sk_list.lock);
964
965 return node ? sk : sk1;
966}
967
Ville Tervob62f3282011-02-10 22:38:50 -0300968/* Find socket with cid and source bdaddr.
969 * Returns closest match, locked.
970 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300972{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 struct sock *sk = NULL, *sk1 = NULL;
974 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300977
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 sk_for_each(sk, node, &l2cap_sk_list.head) {
979 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300980 continue;
981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300983 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 if (!bacmp(&bt_sk(sk)->src, src))
985 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300986
987 /* Closest match */
988 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300990 }
991 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300996}
997
998static void l2cap_le_conn_ready(struct l2cap_conn *conn)
999{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 struct l2cap_chan_list *list = &conn->chan_list;
1001 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -03001002
1003 BT_DBG("");
1004
1005 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -03001007 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -03001009 return;
1010
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -03001011 bh_lock_sock(parent);
1012
Ville Tervob62f3282011-02-10 22:38:50 -03001013 /* Check for backlog size */
1014 if (sk_acceptq_is_full(parent)) {
1015 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1016 goto clean;
1017 }
1018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1020 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -03001021 goto clean;
1022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001023 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001024
1025 hci_conn_hold(conn->hcon);
1026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -03001028 bacpy(&bt_sk(sk)->src, conn->src);
1029 bacpy(&bt_sk(sk)->dst, conn->dst);
Brian Gix20de7cf2012-02-02 14:56:51 -08001030 l2cap_pi(sk)->incoming = 1;
Ville Tervob62f3282011-02-10 22:38:50 -03001031
Gustavo F. Padovand1010242011-03-25 00:39:48 -03001032 bt_accept_enqueue(parent, sk);
1033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -03001037 parent->sk_data_ready(parent, 0);
1038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001040
1041clean:
1042 bh_unlock_sock(parent);
1043}
1044
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001045static void l2cap_conn_ready(struct l2cap_conn *conn)
1046{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047 struct l2cap_chan_list *l = &conn->chan_list;
1048 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001049
1050 BT_DBG("conn %p", conn);
1051
Ville Tervob62f3282011-02-10 22:38:50 -03001052 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1053 l2cap_le_conn_ready(conn);
1054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001056
Brian Gixa68668b2011-08-11 15:49:36 -07001057 if (l->head) {
1058 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1059 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001060
Brian Gixa68668b2011-08-11 15:49:36 -07001061 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001062 u8 sec_level = l2cap_pi(sk)->sec_level;
1063 u8 pending_sec = conn->hcon->pending_sec_level;
1064
1065 if (pending_sec > sec_level)
1066 sec_level = pending_sec;
1067
Brian Gix80fb3a92012-01-31 13:15:20 -08001068 if (smp_conn_security(conn, sec_level))
Brian Gixa68668b2011-08-11 15:49:36 -07001069 l2cap_chan_ready(sk);
Brian Gix80fb3a92012-01-31 13:15:20 -08001070
1071 hci_conn_put(conn->hcon);
Ville Tervoacd7d372011-02-10 22:38:49 -03001072
Brian Gixa68668b2011-08-11 15:49:36 -07001073 } else if (sk->sk_type != SOCK_SEQPACKET &&
1074 sk->sk_type != SOCK_STREAM) {
1075 l2cap_sock_clear_timer(sk);
1076 sk->sk_state = BT_CONNECTED;
1077 sk->sk_state_change(sk);
1078 } else if (sk->sk_state == BT_CONNECT)
1079 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001080
Brian Gixa68668b2011-08-11 15:49:36 -07001081 bh_unlock_sock(sk);
1082 }
1083 } else if (conn->hcon->type == LE_LINK) {
1084 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001085 }
1086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 read_unlock(&l->lock);
Brian Gix20de7cf2012-02-02 14:56:51 -08001088
1089 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1090 l2cap_le_conn_ready(conn);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001091}
1092
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001093/* Notify sockets that we cannot guaranty reliability anymore */
1094static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1095{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 struct l2cap_chan_list *l = &conn->chan_list;
1097 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001098
1099 BT_DBG("conn %p", conn);
1100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1104 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001105 sk->sk_err = err;
1106 }
1107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001109}
1110
1111static void l2cap_info_timeout(unsigned long arg)
1112{
1113 struct l2cap_conn *conn = (void *) arg;
1114
Marcel Holtmann984947d2009-02-06 23:35:19 +01001115 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001116 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001117
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001118 l2cap_conn_start(conn);
1119}
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1122{
Marcel Holtmann01394182006-07-03 10:02:46 +02001123 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Marcel Holtmann01394182006-07-03 10:02:46 +02001125 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return conn;
1127
Marcel Holtmann01394182006-07-03 10:02:46 +02001128 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1129 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 hcon->l2cap_data = conn;
1133 conn->hcon = hcon;
1134
Marcel Holtmann01394182006-07-03 10:02:46 +02001135 BT_DBG("hcon %p conn %p", hcon, conn);
1136
Ville Tervoacd7d372011-02-10 22:38:49 -03001137 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1138 conn->mtu = hcon->hdev->le_mtu;
1139 else
1140 conn->mtu = hcon->hdev->acl_mtu;
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 conn->src = &hcon->hdev->bdaddr;
1143 conn->dst = &hcon->dst;
1144
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001145 conn->feat_mask = 0;
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001150 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001151 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001152 (unsigned long) conn);
1153 else
Ville Tervob62f3282011-02-10 22:38:50 -03001154 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001155 (unsigned long) conn);
1156
Marcel Holtmann2950f212009-02-12 14:02:50 +01001157 conn->disc_reason = 0x13;
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 return conn;
1160}
1161
Mat Martineau3b9239a2012-02-16 11:54:30 -08001162static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 struct l2cap_conn *conn = hcon->l2cap_data;
1165 struct sock *sk;
1166 struct sock *next;
1167
1168 if (!conn)
1169 return;
1170
1171 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1172
1173 if ((conn->hcon == hcon) && (conn->rx_skb))
1174 kfree_skb(conn->rx_skb);
1175
1176 BT_DBG("conn->hcon %p", conn->hcon);
1177
1178 /* Kill channels */
1179 for (sk = conn->chan_list.head; sk; ) {
1180 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1181 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1182 next = l2cap_pi(sk)->next_c;
Mat Martineau3b9239a2012-02-16 11:54:30 -08001183 if (is_process)
1184 lock_sock(sk);
1185 else
1186 bh_lock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 l2cap_chan_del(sk, err);
Mat Martineau3b9239a2012-02-16 11:54:30 -08001188 if (is_process)
1189 release_sock(sk);
1190 else
1191 bh_unlock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 l2cap_sock_kill(sk);
1193 sk = next;
1194 } else
1195 sk = l2cap_pi(sk)->next_c;
1196 }
1197
1198 if (conn->hcon == hcon) {
1199 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1200 del_timer_sync(&conn->info_timer);
1201
1202 hcon->l2cap_data = NULL;
1203
1204 kfree(conn);
1205 }
1206}
1207
1208static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1209{
1210 struct l2cap_chan_list *l = &conn->chan_list;
1211 write_lock_bh(&l->lock);
1212 __l2cap_chan_add(conn, sk);
1213 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218/* Find socket with psm and source bdaddr.
1219 * Returns closest match.
1220 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223 struct sock *sk = NULL, *sk1 = NULL;
1224 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 sk_for_each(sk, node, &l2cap_sk_list.head) {
1229 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 continue;
1231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234 if (!bacmp(&bt_sk(sk)->src, src))
1235 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 /* Closest match */
1238 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 }
1241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246}
1247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
1250 bdaddr_t *src = &bt_sk(sk)->src;
1251 bdaddr_t *dst = &bt_sk(sk)->dst;
1252 struct l2cap_conn *conn;
1253 struct hci_conn *hcon;
1254 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001255 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001256 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001258 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001261 hdev = hci_get_route(dst, src);
1262 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 return -EHOSTUNREACH;
1264
1265 hci_dev_lock_bh(hdev);
1266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 if (l2cap_pi(sk)->fixed_channel) {
1270 /* Fixed channels piggyback on existing ACL connections */
1271 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1272 if (!hcon || !hcon->l2cap_data) {
1273 err = -ENOTCONN;
1274 goto done;
1275 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 conn = hcon->l2cap_data;
1278 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001279 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001281 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001282 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001284 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286 if (IS_ERR(hcon)) {
1287 err = PTR_ERR(hcon);
1288 goto done;
1289 }
1290
1291 conn = l2cap_conn_add(hcon, 0);
1292 if (!conn) {
1293 hci_conn_put(hcon);
1294 err = -ENOMEM;
1295 goto done;
1296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 }
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 /* Update source addr of the socket */
1300 bacpy(src, conn->src);
1301
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001303
Brian Gixa68668b2011-08-11 15:49:36 -07001304 if ((l2cap_pi(sk)->fixed_channel) ||
1305 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1306 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 sk->sk_state = BT_CONNECTED;
1308 sk->sk_state_change(sk);
1309 } else {
1310 sk->sk_state = BT_CONNECT;
1311 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1312 sk->sk_state_change(sk);
1313
1314 if (hcon->state == BT_CONNECTED) {
1315 if (sk->sk_type != SOCK_SEQPACKET &&
1316 sk->sk_type != SOCK_STREAM) {
1317 l2cap_sock_clear_timer(sk);
1318 if (l2cap_check_security(sk)) {
1319 sk->sk_state = BT_CONNECTED;
1320 sk->sk_state_change(sk);
1321 }
1322 } else
1323 l2cap_do_start(sk);
1324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 }
1326
Ville Tervo30e76272011-02-22 16:10:53 -03001327 err = 0;
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329done:
1330 hci_dev_unlock_bh(hdev);
1331 hci_dev_put(hdev);
1332 return err;
1333}
1334
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001335int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001336{
1337 DECLARE_WAITQUEUE(wait, current);
1338 int err = 0;
1339 int timeo = HZ/5;
1340
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001341 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1343 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1344 set_current_state(TASK_INTERRUPTIBLE);
1345
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001346 if (!timeo)
1347 timeo = HZ/5;
1348
1349 if (signal_pending(current)) {
1350 err = sock_intr_errno(timeo);
1351 break;
1352 }
1353
1354 release_sock(sk);
1355 timeo = schedule_timeout(timeo);
1356 lock_sock(sk);
1357
1358 err = sock_error(sk);
1359 if (err)
1360 break;
1361 }
1362 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001363 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001364 return err;
1365}
1366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001368{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 struct l2cap_pinfo *pi =
1370 container_of(work, struct l2cap_pinfo, tx_work);
1371 struct sock *sk = (struct sock *)pi;
1372 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001374 lock_sock(sk);
1375 l2cap_ertm_send(sk);
1376 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07001377 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001378}
1379
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001381{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 struct sock *sk = skb->sk;
1383 int queued;
Mat Martineau2f0cd842011-10-20 14:34:26 -07001384 int keep_sk = 0;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1387 if (queued < L2CAP_MIN_ERTM_QUEUED)
Mat Martineau2f0cd842011-10-20 14:34:26 -07001388 keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
1389
1390 if (!keep_sk)
1391 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001392}
1393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001395{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1401 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1402 BT_DBG("Sending on AMP connection %p %p",
1403 pi->ampcon, pi->ampchan);
1404 if (pi->ampchan)
1405 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1406 ACL_COMPLETE);
1407 else
1408 kfree_skb(skb);
1409 } else {
1410 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412 bt_cb(skb)->force_active = pi->force_active;
1413 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1416 !l2cap_pi(sk)->flushable)
1417 flags = ACL_START_NO_FLUSH;
1418 else
1419 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001421 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001422 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001423}
1424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001425int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001426{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001427 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 struct l2cap_pinfo *pi = l2cap_pi(sk);
1429 struct bt_l2cap_control *control;
1430 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001435 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1438 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1441 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1442 return 0;
1443
1444 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1445 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1446 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1447
1448 skb = sk->sk_send_head;
1449
1450 bt_cb(skb)->retries = 1;
1451 control = &bt_cb(skb)->control;
1452
1453 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1454 control->final = 1;
1455 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1456 }
1457 control->reqseq = pi->buffer_seq;
1458 pi->last_acked_seq = pi->buffer_seq;
1459 control->txseq = pi->next_tx_seq;
1460
1461 if (pi->extended_control) {
1462 put_unaligned_le32(__pack_extended_control(control),
1463 skb->data + L2CAP_HDR_SIZE);
1464 } else {
1465 put_unaligned_le16(__pack_enhanced_control(control),
1466 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001467 }
1468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 if (pi->fcs == L2CAP_FCS_CRC16)
1470 apply_fcs(skb);
1471
1472 /* Clone after data has been modified. Data is assumed to be
1473 read-only (for locking purposes) on cloned sk_buffs.
1474 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001475 tx_skb = skb_clone(skb, GFP_ATOMIC);
1476
Mat Martineau0c04ef92011-12-07 16:41:22 -08001477 if (!tx_skb)
1478 break;
1479
Mat Martineau2f0cd842011-10-20 14:34:26 -07001480 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481 tx_skb->sk = sk;
1482 tx_skb->destructor = l2cap_skb_destructor;
1483 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001485 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1488 pi->unacked_frames += 1;
1489 pi->frames_sent += 1;
1490 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1493 sk->sk_send_head = NULL;
1494 else
1495 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
Mat Martineauff105392011-12-22 12:02:13 -08001496
1497 l2cap_do_send(sk, tx_skb);
1498 BT_DBG("Sent txseq %d", (int)control->txseq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 }
1500
1501 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1502 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1503 atomic_read(&pi->ertm_queued));
1504
1505 return sent;
1506}
1507
1508int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1509{
1510 struct sk_buff *skb;
1511 struct l2cap_pinfo *pi = l2cap_pi(sk);
1512 struct bt_l2cap_control *control;
1513 int sent = 0;
1514
1515 BT_DBG("sk %p, skbs %p", sk, skbs);
1516
1517 if (sk->sk_state != BT_CONNECTED)
1518 return -ENOTCONN;
1519
1520 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1521 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1522 return 0;
1523
1524 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1525
1526 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1527 while (!skb_queue_empty(TX_QUEUE(sk))) {
1528
1529 skb = skb_dequeue(TX_QUEUE(sk));
1530
1531 BT_DBG("skb %p", skb);
1532
1533 bt_cb(skb)->retries = 1;
1534 control = &bt_cb(skb)->control;
1535
1536 BT_DBG("control %p", control);
1537
1538 control->reqseq = 0;
1539 control->txseq = pi->next_tx_seq;
1540
1541 if (pi->extended_control) {
1542 put_unaligned_le32(__pack_extended_control(control),
1543 skb->data + L2CAP_HDR_SIZE);
1544 } else {
1545 put_unaligned_le16(__pack_enhanced_control(control),
1546 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001547 }
1548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 if (pi->fcs == L2CAP_FCS_CRC16)
1550 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001552 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1557 pi->frames_sent += 1;
1558 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001559 }
1560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561 BT_DBG("Sent %d", sent);
1562
1563 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001564}
1565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001566static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001567{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 while (len > 0) {
1569 if (iv->iov_len) {
1570 int copy = min_t(unsigned int, len, iv->iov_len);
1571 memcpy(kdata, iv->iov_base, copy);
1572 len -= copy;
1573 kdata += copy;
1574 iv->iov_base += copy;
1575 iv->iov_len -= copy;
1576 }
1577 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001578 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001581}
1582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1584 int len, int count, struct sk_buff *skb,
1585 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001586{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001587 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001588 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001590 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1593 msg, (int)len, (int)count, skb);
1594
1595 if (!conn)
1596 return -ENOTCONN;
1597
1598 /* When resegmenting, data is copied from kernel space */
1599 if (reseg) {
1600 err = memcpy_fromkvec(skb_put(skb, count),
1601 (struct kvec *) msg->msg_iov, count);
1602 } else {
1603 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1604 count);
1605 }
1606
1607 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001608 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610 sent += count;
1611 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001612 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614 /* Continuation fragments (no L2CAP header) */
1615 frag = &skb_shinfo(skb)->frag_list;
1616 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 count = min_t(unsigned int, conn->mtu, len);
1619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001620 /* Add room for the FCS if it fits */
1621 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1622 len + L2CAP_FCS_SIZE <= conn->mtu)
1623 skblen = count + L2CAP_FCS_SIZE;
1624 else
1625 skblen = count;
1626
1627 /* Don't use bt_skb_send_alloc() while resegmenting, since
1628 * it is not ok to block.
1629 */
1630 if (reseg) {
1631 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1632 if (*frag)
1633 skb_set_owner_w(*frag, sk);
1634 } else {
1635 *frag = bt_skb_send_alloc(sk, skblen,
1636 msg->msg_flags & MSG_DONTWAIT, &err);
1637 }
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 return -EFAULT;
1641
1642 /* When resegmenting, data is copied from kernel space */
1643 if (reseg) {
1644 err = memcpy_fromkvec(skb_put(*frag, count),
1645 (struct kvec *) msg->msg_iov,
1646 count);
1647 } else {
1648 err = memcpy_fromiovec(skb_put(*frag, count),
1649 msg->msg_iov, count);
1650 }
1651
1652 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001653 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655 sent += count;
1656 len -= count;
1657
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 final = *frag;
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 frag = &(*frag)->next;
1661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1664 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1665 if (reseg) {
1666 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1667 GFP_ATOMIC);
1668 if (*frag)
1669 skb_set_owner_w(*frag, sk);
1670 } else {
1671 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1672 msg->msg_flags & MSG_DONTWAIT,
1673 &err);
1674 }
1675
1676 if (!*frag)
1677 return -EFAULT;
1678
1679 final = *frag;
1680 }
1681
1682 skb_put(final, L2CAP_FCS_SIZE);
1683 }
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001686}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001689{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001691 struct sk_buff *skb;
1692 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1693 struct l2cap_hdr *lh;
1694
1695 BT_DBG("sk %p len %d", sk, (int)len);
1696
1697 count = min_t(unsigned int, (conn->mtu - hlen), len);
1698 skb = bt_skb_send_alloc(sk, count + hlen,
1699 msg->msg_flags & MSG_DONTWAIT, &err);
1700 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001701 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001702
1703 /* Create L2CAP header */
1704 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001706 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001707 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001710 if (unlikely(err < 0)) {
1711 kfree_skb(skb);
1712 return ERR_PTR(err);
1713 }
1714 return skb;
1715}
1716
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001718{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001720 struct sk_buff *skb;
1721 int err, count, hlen = L2CAP_HDR_SIZE;
1722 struct l2cap_hdr *lh;
1723
1724 BT_DBG("sk %p len %d", sk, (int)len);
1725
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1729 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001730 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001731
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001738 if (unlikely(err < 0)) {
1739 kfree_skb(skb);
1740 return ERR_PTR(err);
1741 }
1742 return skb;
1743}
1744
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1746 struct msghdr *msg, size_t len,
1747 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001748{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001749 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001750 int err, count, hlen;
1751 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001752 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001753 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755 if (l2cap_pi(sk)->extended_control)
1756 hlen = L2CAP_EXTENDED_HDR_SIZE;
1757 else
1758 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001759
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001760 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001761 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 if (fcs == L2CAP_FCS_CRC16)
1764 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1767 sk, msg, (int)len, (int)sdulen, hlen);
1768
1769 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1770
1771 /* Allocate extra headroom for Qualcomm PAL. This is only
1772 * necessary in two places (here and when creating sframes)
1773 * because only unfragmented iframes and sframes are sent
1774 * using AMP controllers.
1775 */
1776 if (l2cap_pi(sk)->ampcon &&
1777 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1778 reserve = BT_SKB_RESERVE_80211;
1779
1780 /* Don't use bt_skb_send_alloc() while resegmenting, since
1781 * it is not ok to block.
1782 */
1783 if (reseg) {
1784 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1785 if (skb)
1786 skb_set_owner_w(skb, sk);
1787 } else {
1788 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1789 msg->msg_flags & MSG_DONTWAIT, &err);
1790 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001791 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001792 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 if (reserve)
1795 skb_reserve(skb, reserve);
1796
1797 bt_cb(skb)->control.fcs = fcs;
1798
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001799 /* Create L2CAP header */
1800 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1802 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 /* Control header is populated later */
1805 if (l2cap_pi(sk)->extended_control)
1806 put_unaligned_le32(0, skb_put(skb, 4));
1807 else
1808 put_unaligned_le16(0, skb_put(skb, 2));
1809
1810 if (sdulen)
1811 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1812
1813 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001814 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001815 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001816 kfree_skb(skb);
1817 return ERR_PTR(err);
1818 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001819
1820 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001821 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822}
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001825{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001826 struct l2cap_pinfo *pi;
1827 struct sk_buff *acked_skb;
1828 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1835 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1838 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1839
1840 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1841 ackseq = __next_seq(ackseq, pi)) {
1842
1843 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1844 if (acked_skb) {
1845 skb_unlink(acked_skb, TX_QUEUE(sk));
1846 kfree_skb(acked_skb);
1847 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001848 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001849 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 pi->expected_ack_seq = reqseq;
1852
1853 if (pi->unacked_frames == 0)
1854 l2cap_ertm_stop_retrans_timer(pi);
1855
1856 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001857}
1858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001860{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001861 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 int len;
1863 int reserve = 0;
1864 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 if (l2cap_pi(sk)->extended_control)
1867 len = L2CAP_EXTENDED_HDR_SIZE;
1868 else
1869 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1872 len += L2CAP_FCS_SIZE;
1873
1874 /* Allocate extra headroom for Qualcomm PAL */
1875 if (l2cap_pi(sk)->ampcon &&
1876 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1877 reserve = BT_SKB_RESERVE_80211;
1878
1879 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1880
1881 if (!skb)
1882 return ERR_PTR(-ENOMEM);
1883
1884 if (reserve)
1885 skb_reserve(skb, reserve);
1886
1887 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1888 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1889 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1890
1891 if (l2cap_pi(sk)->extended_control)
1892 put_unaligned_le32(control, skb_put(skb, 4));
1893 else
1894 put_unaligned_le16(control, skb_put(skb, 2));
1895
1896 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1897 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1898 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001899 }
1900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 return skb;
1902}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904static void l2cap_ertm_send_sframe(struct sock *sk,
1905 struct bt_l2cap_control *control)
1906{
1907 struct l2cap_pinfo *pi;
1908 struct sk_buff *skb;
1909 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001912
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001913 if (control->frame_type != 's')
1914 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1919 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1920 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1921 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1922 return;
1923 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001924
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1926 control->final = 1;
1927 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1928 }
1929
1930 if (control->super == L2CAP_SFRAME_RR)
1931 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1932 else if (control->super == L2CAP_SFRAME_RNR)
1933 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1934
1935 if (control->super != L2CAP_SFRAME_SREJ) {
1936 pi->last_acked_seq = control->reqseq;
1937 l2cap_ertm_stop_ack_timer(pi);
1938 }
1939
1940 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1941 (int) control->final, (int) control->poll,
1942 (int) control->super);
1943
1944 if (pi->extended_control)
1945 control_field = __pack_extended_control(control);
1946 else
1947 control_field = __pack_enhanced_control(control);
1948
1949 skb = l2cap_create_sframe_pdu(sk, control_field);
1950 if (!IS_ERR(skb))
1951 l2cap_do_send(sk, skb);
1952}
1953
1954static void l2cap_ertm_send_ack(struct sock *sk)
1955{
1956 struct l2cap_pinfo *pi = l2cap_pi(sk);
1957 struct bt_l2cap_control control;
1958 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1959 int threshold;
1960
1961 BT_DBG("sk %p", sk);
1962 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1963 (int)pi->buffer_seq);
1964
1965 memset(&control, 0, sizeof(control));
1966 control.frame_type = 's';
1967
1968 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1969 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1970 l2cap_ertm_stop_ack_timer(pi);
1971 control.super = L2CAP_SFRAME_RNR;
1972 control.reqseq = pi->buffer_seq;
1973 l2cap_ertm_send_sframe(sk, &control);
1974 } else {
1975 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1976 l2cap_ertm_send(sk);
1977 /* If any i-frames were sent, they included an ack */
1978 if (pi->buffer_seq == pi->last_acked_seq)
1979 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001980 }
1981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982 /* Ack now if the tx window is 3/4ths full.
1983 * Calculate without mul or div
1984 */
1985 threshold = pi->tx_win;
1986 threshold += threshold << 1;
1987 threshold >>= 2;
1988
1989 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1990 threshold);
1991
1992 if (frames_to_ack >= threshold) {
1993 l2cap_ertm_stop_ack_timer(pi);
1994 control.super = L2CAP_SFRAME_RR;
1995 control.reqseq = pi->buffer_seq;
1996 l2cap_ertm_send_sframe(sk, &control);
1997 frames_to_ack = 0;
1998 }
1999
2000 if (frames_to_ack)
2001 l2cap_ertm_start_ack_timer(pi);
2002 }
2003}
2004
2005static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
2006{
2007 struct l2cap_pinfo *pi;
2008 struct bt_l2cap_control control;
2009
2010 BT_DBG("sk %p, poll %d", sk, (int) poll);
2011
2012 pi = l2cap_pi(sk);
2013
2014 memset(&control, 0, sizeof(control));
2015 control.frame_type = 's';
2016 control.poll = poll;
2017
2018 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
2019 control.super = L2CAP_SFRAME_RNR;
2020 else
2021 control.super = L2CAP_SFRAME_RR;
2022
2023 control.reqseq = pi->buffer_seq;
2024 l2cap_ertm_send_sframe(sk, &control);
2025}
2026
2027static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
2028{
2029 struct l2cap_pinfo *pi;
2030 struct bt_l2cap_control control;
2031
2032 BT_DBG("sk %p", sk);
2033
2034 pi = l2cap_pi(sk);
2035
2036 memset(&control, 0, sizeof(control));
2037 control.frame_type = 's';
2038 control.final = 1;
2039 control.reqseq = pi->buffer_seq;
2040 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
2041
2042 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2043 control.super = L2CAP_SFRAME_RNR;
2044 l2cap_ertm_send_sframe(sk, &control);
2045 }
2046
2047 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
2048 (pi->unacked_frames > 0))
2049 l2cap_ertm_start_retrans_timer(pi);
2050
2051 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
2052
2053 /* Send pending iframes */
2054 l2cap_ertm_send(sk);
2055
2056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
2057 /* F-bit wasn't sent in an s-frame or i-frame yet, so
2058 * send it now.
2059 */
2060 control.super = L2CAP_SFRAME_RR;
2061 l2cap_ertm_send_sframe(sk, &control);
2062 }
2063}
2064
2065static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
2066{
2067 struct bt_l2cap_control control;
2068 struct l2cap_pinfo *pi;
2069 u16 seq;
2070
2071 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2072
2073 pi = l2cap_pi(sk);
2074 memset(&control, 0, sizeof(control));
2075 control.frame_type = 's';
2076 control.super = L2CAP_SFRAME_SREJ;
2077
2078 for (seq = pi->expected_tx_seq; seq != txseq;
2079 seq = __next_seq(seq, pi)) {
2080 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2081 control.reqseq = seq;
2082 l2cap_ertm_send_sframe(sk, &control);
2083 l2cap_seq_list_append(&pi->srej_list, seq);
2084 }
2085 }
2086
2087 pi->expected_tx_seq = __next_seq(txseq, pi);
2088}
2089
2090static void l2cap_ertm_send_srej_tail(struct sock *sk)
2091{
2092 struct bt_l2cap_control control;
2093 struct l2cap_pinfo *pi;
2094
2095 BT_DBG("sk %p", sk);
2096
2097 pi = l2cap_pi(sk);
2098
2099 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2100 return;
2101
2102 memset(&control, 0, sizeof(control));
2103 control.frame_type = 's';
2104 control.super = L2CAP_SFRAME_SREJ;
2105 control.reqseq = pi->srej_list.tail;
2106 l2cap_ertm_send_sframe(sk, &control);
2107}
2108
2109static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2110{
2111 struct bt_l2cap_control control;
2112 struct l2cap_pinfo *pi;
2113 u16 initial_head;
2114 u16 seq;
2115
2116 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2117
2118 pi = l2cap_pi(sk);
2119 memset(&control, 0, sizeof(control));
2120 control.frame_type = 's';
2121 control.super = L2CAP_SFRAME_SREJ;
2122
2123 /* Capture initial list head to allow only one pass through the list. */
2124 initial_head = pi->srej_list.head;
2125
2126 do {
2127 seq = l2cap_seq_list_pop(&pi->srej_list);
2128 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2129 break;
2130
2131 control.reqseq = seq;
2132 l2cap_ertm_send_sframe(sk, &control);
2133 l2cap_seq_list_append(&pi->srej_list, seq);
2134 } while (pi->srej_list.head != initial_head);
2135}
2136
2137static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2138{
2139 struct l2cap_pinfo *pi = l2cap_pi(sk);
2140 BT_DBG("sk %p", sk);
2141
2142 pi->expected_tx_seq = pi->buffer_seq;
2143 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2144 skb_queue_purge(SREJ_QUEUE(sk));
2145 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2146}
2147
2148static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2149 struct bt_l2cap_control *control,
2150 struct sk_buff_head *skbs, u8 event)
2151{
2152 struct l2cap_pinfo *pi;
2153 int err = 0;
2154
2155 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2156 (int)event);
2157 pi = l2cap_pi(sk);
2158
2159 switch (event) {
2160 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2161 if (sk->sk_send_head == NULL)
2162 sk->sk_send_head = skb_peek(skbs);
2163
2164 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2165 l2cap_ertm_send(sk);
2166 break;
2167 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2168 BT_DBG("Enter LOCAL_BUSY");
2169 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2170
2171 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2172 /* The SREJ_SENT state must be aborted if we are to
2173 * enter the LOCAL_BUSY state.
2174 */
2175 l2cap_ertm_abort_rx_srej_sent(sk);
2176 }
2177
2178 l2cap_ertm_send_ack(sk);
2179
2180 break;
2181 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2182 BT_DBG("Exit LOCAL_BUSY");
2183 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2184
2185 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2186 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2187 pi->amp_move_state =
2188 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2189 l2cap_send_move_chan_cfm(pi->conn, pi,
2190 pi->scid,
2191 L2CAP_MOVE_CHAN_CONFIRMED);
2192 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2193 } else if (pi->amp_move_role ==
2194 L2CAP_AMP_MOVE_RESPONDER) {
2195 pi->amp_move_state =
2196 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2197 l2cap_send_move_chan_rsp(pi->conn,
2198 pi->amp_move_cmd_ident,
2199 pi->dcid,
2200 L2CAP_MOVE_CHAN_SUCCESS);
2201 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002202 break;
2203 }
2204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002205 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2206 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2207 struct bt_l2cap_control local_control;
2208
2209 memset(&local_control, 0, sizeof(local_control));
2210 local_control.frame_type = 's';
2211 local_control.super = L2CAP_SFRAME_RR;
2212 local_control.poll = 1;
2213 local_control.reqseq = pi->buffer_seq;
2214 l2cap_ertm_send_sframe(sk, &local_control);
2215
2216 pi->retry_count = 1;
2217 l2cap_ertm_start_monitor_timer(pi);
2218 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002219 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002220 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002221 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2222 l2cap_ertm_process_reqseq(sk, control->reqseq);
2223 break;
2224 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2225 l2cap_ertm_send_rr_or_rnr(sk, 1);
2226 pi->retry_count = 1;
2227 l2cap_ertm_start_monitor_timer(pi);
2228 l2cap_ertm_stop_ack_timer(pi);
2229 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2230 break;
2231 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2232 l2cap_ertm_send_rr_or_rnr(sk, 1);
2233 pi->retry_count = 1;
2234 l2cap_ertm_start_monitor_timer(pi);
2235 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2236 break;
2237 case L2CAP_ERTM_EVENT_RECV_FBIT:
2238 /* Nothing to process */
2239 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002240 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002241 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002242 }
2243
2244 return err;
2245}
2246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002247static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2248 struct bt_l2cap_control *control,
2249 struct sk_buff_head *skbs, u8 event)
2250{
2251 struct l2cap_pinfo *pi;
2252 int err = 0;
2253
2254 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2255 (int)event);
2256 pi = l2cap_pi(sk);
2257
2258 switch (event) {
2259 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2260 if (sk->sk_send_head == NULL)
2261 sk->sk_send_head = skb_peek(skbs);
2262 /* Queue data, but don't send. */
2263 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2264 break;
2265 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2266 BT_DBG("Enter LOCAL_BUSY");
2267 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2268
2269 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2270 /* The SREJ_SENT state must be aborted if we are to
2271 * enter the LOCAL_BUSY state.
2272 */
2273 l2cap_ertm_abort_rx_srej_sent(sk);
2274 }
2275
2276 l2cap_ertm_send_ack(sk);
2277
2278 break;
2279 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2280 BT_DBG("Exit LOCAL_BUSY");
2281 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2282
2283 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2284 struct bt_l2cap_control local_control;
2285 memset(&local_control, 0, sizeof(local_control));
2286 local_control.frame_type = 's';
2287 local_control.super = L2CAP_SFRAME_RR;
2288 local_control.poll = 1;
2289 local_control.reqseq = pi->buffer_seq;
2290 l2cap_ertm_send_sframe(sk, &local_control);
2291
2292 pi->retry_count = 1;
2293 l2cap_ertm_start_monitor_timer(pi);
2294 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2295 }
2296 break;
2297 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2298 l2cap_ertm_process_reqseq(sk, control->reqseq);
2299
2300 /* Fall through */
2301
2302 case L2CAP_ERTM_EVENT_RECV_FBIT:
2303 if (control && control->final) {
2304 l2cap_ertm_stop_monitor_timer(pi);
2305 if (pi->unacked_frames > 0)
2306 l2cap_ertm_start_retrans_timer(pi);
2307 pi->retry_count = 0;
2308 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2309 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2310 }
2311 break;
2312 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2313 /* Ignore */
2314 break;
2315 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2316 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2317 l2cap_ertm_send_rr_or_rnr(sk, 1);
2318 l2cap_ertm_start_monitor_timer(pi);
2319 pi->retry_count += 1;
2320 } else
2321 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2322 break;
2323 default:
2324 break;
2325 }
2326
2327 return err;
2328}
2329
2330int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2331 struct sk_buff_head *skbs, u8 event)
2332{
2333 struct l2cap_pinfo *pi;
2334 int err = 0;
2335
2336 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2337 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2338
2339 pi = l2cap_pi(sk);
2340
2341 switch (pi->tx_state) {
2342 case L2CAP_ERTM_TX_STATE_XMIT:
2343 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2344 break;
2345 case L2CAP_ERTM_TX_STATE_WAIT_F:
2346 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2347 break;
2348 default:
2349 /* Ignore event */
2350 break;
2351 }
2352
2353 return err;
2354}
2355
2356int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2357 struct msghdr *msg, size_t len, int reseg)
2358{
2359 struct sk_buff *skb;
2360 u16 sdu_len;
2361 size_t pdu_len;
2362 int err = 0;
2363 u8 sar;
2364
2365 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2366
2367 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2368 * so fragmented skbs are not used. The HCI layer's handling
2369 * of fragmented skbs is not compatible with ERTM's queueing.
2370 */
2371
2372 /* PDU size is derived from the HCI MTU */
2373 pdu_len = l2cap_pi(sk)->conn->mtu;
2374
2375 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2376 if (!l2cap_pi(sk)->ampcon)
2377 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2378
2379 /* Adjust for largest possible L2CAP overhead. */
2380 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2381
2382 /* Remote device may have requested smaller PDUs */
2383 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2384
2385 if (len <= pdu_len) {
2386 sar = L2CAP_SAR_UNSEGMENTED;
2387 sdu_len = 0;
2388 pdu_len = len;
2389 } else {
2390 sar = L2CAP_SAR_START;
2391 sdu_len = len;
2392 pdu_len -= L2CAP_SDULEN_SIZE;
2393 }
2394
2395 while (len) {
2396 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2397
2398 BT_DBG("iframe skb %p", skb);
2399
2400 if (IS_ERR(skb)) {
2401 __skb_queue_purge(seg_queue);
2402 return PTR_ERR(skb);
2403 }
2404
2405 bt_cb(skb)->control.sar = sar;
2406 __skb_queue_tail(seg_queue, skb);
2407
2408 len -= pdu_len;
2409 if (sdu_len) {
2410 sdu_len = 0;
2411 pdu_len += L2CAP_SDULEN_SIZE;
2412 }
2413
2414 if (len <= pdu_len) {
2415 sar = L2CAP_SAR_END;
2416 pdu_len = len;
2417 } else {
2418 sar = L2CAP_SAR_CONTINUE;
2419 }
2420 }
2421
2422 return err;
2423}
2424
2425static inline int is_initial_frame(u8 sar)
2426{
2427 return (sar == L2CAP_SAR_UNSEGMENTED ||
2428 sar == L2CAP_SAR_START);
2429}
2430
2431static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2432 size_t veclen)
2433{
2434 struct sk_buff *frag_iter;
2435
2436 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2437
2438 if (iv->iov_len + skb->len > veclen)
2439 return -ENOMEM;
2440
2441 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2442 iv->iov_len += skb->len;
2443
2444 skb_walk_frags(skb, frag_iter) {
2445 if (iv->iov_len + skb->len > veclen)
2446 return -ENOMEM;
2447
2448 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2449 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2450 frag_iter->len);
2451 iv->iov_len += frag_iter->len;
2452 }
2453
2454 return 0;
2455}
2456
2457int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2458{
2459 void *buf;
2460 int buflen;
2461 int err = 0;
2462 struct sk_buff *skb;
2463 struct msghdr msg;
2464 struct kvec iv;
2465 struct sk_buff_head old_frames;
2466 struct l2cap_pinfo *pi = l2cap_pi(sk);
2467
2468 BT_DBG("sk %p", sk);
2469
2470 if (skb_queue_empty(queue))
2471 return 0;
2472
2473 memset(&msg, 0, sizeof(msg));
2474 msg.msg_iov = (struct iovec *) &iv;
2475
2476 buflen = pi->omtu + L2CAP_FCS_SIZE;
2477 buf = kzalloc(buflen, GFP_TEMPORARY);
2478
2479 if (!buf) {
2480 BT_DBG("Could not allocate resegmentation buffer");
2481 return -ENOMEM;
2482 }
2483
2484 /* Move current frames off the original queue */
2485 __skb_queue_head_init(&old_frames);
2486 skb_queue_splice_tail_init(queue, &old_frames);
2487
2488 while (!skb_queue_empty(&old_frames)) {
2489 struct sk_buff_head current_sdu;
2490 u8 original_sar;
2491
2492 /* Reassemble each SDU from one or more PDUs */
2493
2494 iv.iov_base = buf;
2495 iv.iov_len = 0;
2496
2497 skb = skb_peek(&old_frames);
2498 original_sar = bt_cb(skb)->control.sar;
2499
2500 __skb_unlink(skb, &old_frames);
2501
2502 /* Append data to SDU */
2503 if (pi->extended_control)
2504 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2505 else
2506 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2507
2508 if (original_sar == L2CAP_SAR_START)
2509 skb_pull(skb, L2CAP_SDULEN_SIZE);
2510
2511 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2512
2513 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2514 iv.iov_len -= L2CAP_FCS_SIZE;
2515
2516 /* Free skb */
2517 kfree_skb(skb);
2518
2519 if (err)
2520 break;
2521
2522 while (!skb_queue_empty(&old_frames) && !err) {
2523 /* Check next frame */
2524 skb = skb_peek(&old_frames);
2525
2526 if (is_initial_frame(bt_cb(skb)->control.sar))
2527 break;
2528
2529 __skb_unlink(skb, &old_frames);
2530
2531 /* Append data to SDU */
2532 if (pi->extended_control)
2533 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2534 else
2535 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2536
2537 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2538 skb_pull(skb, L2CAP_SDULEN_SIZE);
2539
2540 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2541
2542 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2543 iv.iov_len -= L2CAP_FCS_SIZE;
2544
2545 /* Free skb */
2546 kfree_skb(skb);
2547 }
2548
2549 if (err)
2550 break;
2551
2552 /* Segment data */
2553
2554 __skb_queue_head_init(&current_sdu);
2555
2556 /* skbs for the SDU were just freed, but the
2557 * resegmenting process could produce more, smaller
2558 * skbs due to smaller PDUs and reduced HCI MTU. The
2559 * overhead from the sk_buff structs could put us over
2560 * the sk_sndbuf limit.
2561 *
2562 * Since this code is running in response to a
2563 * received poll/final packet, it cannot block.
2564 * Therefore, memory allocation needs to be allowed by
2565 * falling back to bt_skb_alloc() (with
2566 * skb_set_owner_w() to maintain sk_wmem_alloc
2567 * correctly).
2568 */
2569 msg.msg_iovlen = iv.iov_len;
2570 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2571 msg.msg_iovlen, 1);
2572
2573 if (err || skb_queue_empty(&current_sdu)) {
2574 BT_DBG("Error %d resegmenting data for socket %p",
2575 err, sk);
2576 __skb_queue_purge(&current_sdu);
2577 break;
2578 }
2579
2580 /* Fix up first PDU SAR bits */
2581 if (!is_initial_frame(original_sar)) {
2582 BT_DBG("Changing SAR bits, %d PDUs",
2583 skb_queue_len(&current_sdu));
2584 skb = skb_peek(&current_sdu);
2585
2586 if (skb_queue_len(&current_sdu) == 1) {
2587 /* Change SAR from 'unsegmented' to 'end' */
2588 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2589 } else {
2590 struct l2cap_hdr *lh;
2591 size_t hdrlen;
2592
2593 /* Change SAR from 'start' to 'continue' */
2594 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2595
2596 /* Start frames contain 2 bytes for
2597 * sdulen and continue frames don't.
2598 * Must rewrite header to eliminate
2599 * sdulen and then adjust l2cap frame
2600 * length.
2601 */
2602 if (pi->extended_control)
2603 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2604 else
2605 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2606
2607 memmove(skb->data + L2CAP_SDULEN_SIZE,
2608 skb->data, hdrlen);
2609 skb_pull(skb, L2CAP_SDULEN_SIZE);
2610 lh = (struct l2cap_hdr *)skb->data;
2611 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2612 L2CAP_SDULEN_SIZE);
2613 }
2614 }
2615
2616 /* Add to queue */
2617 skb_queue_splice_tail(&current_sdu, queue);
2618 }
2619
2620 __skb_queue_purge(&old_frames);
2621 if (err)
2622 __skb_queue_purge(queue);
2623
2624 kfree(buf);
2625
2626 BT_DBG("Queue resegmented, err=%d", err);
2627 return err;
2628}
2629
2630static void l2cap_resegment_worker(struct work_struct *work)
2631{
2632 int err = 0;
2633 struct l2cap_resegment_work *seg_work =
2634 container_of(work, struct l2cap_resegment_work, work);
2635 struct sock *sk = seg_work->sk;
2636
2637 kfree(seg_work);
2638
2639 BT_DBG("sk %p", sk);
2640 lock_sock(sk);
2641
2642 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2643 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002644 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002645 return;
2646 }
2647
2648 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2649
2650 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2651
2652 if (skb_queue_empty(TX_QUEUE(sk)))
2653 sk->sk_send_head = NULL;
2654 else
2655 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2656
2657 if (err)
2658 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2659 else
2660 l2cap_ertm_send(sk);
2661
2662 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002663 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002664}
2665
2666static int l2cap_setup_resegment(struct sock *sk)
2667{
2668 struct l2cap_resegment_work *seg_work;
2669
2670 BT_DBG("sk %p", sk);
2671
2672 if (skb_queue_empty(TX_QUEUE(sk)))
2673 return 0;
2674
2675 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2676 if (!seg_work)
2677 return -ENOMEM;
2678
2679 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002680 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002681 seg_work->sk = sk;
2682
2683 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2684 kfree(seg_work);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002685 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 return -ENOMEM;
2687 }
2688
2689 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2690
2691 return 0;
2692}
2693
2694static inline int l2cap_rmem_available(struct sock *sk)
2695{
2696 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2697 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2698 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2699}
2700
2701static inline int l2cap_rmem_full(struct sock *sk)
2702{
2703 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2704 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2705 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2706}
2707
2708void l2cap_amp_move_init(struct sock *sk)
2709{
2710 BT_DBG("sk %p", sk);
2711
2712 if (!l2cap_pi(sk)->conn)
2713 return;
2714
2715 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2716 return;
2717
2718 if (l2cap_pi(sk)->amp_id == 0) {
2719 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2720 return;
2721 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2722 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2723 amp_create_physical(l2cap_pi(sk)->conn, sk);
2724 } else {
2725 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2726 l2cap_pi(sk)->amp_move_state =
2727 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2728 l2cap_pi(sk)->amp_move_id = 0;
2729 l2cap_amp_move_setup(sk);
2730 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2731 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2732 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2733 }
2734}
2735
2736static void l2cap_chan_ready(struct sock *sk)
2737{
2738 struct sock *parent = bt_sk(sk)->parent;
2739
2740 BT_DBG("sk %p, parent %p", sk, parent);
2741
2742 l2cap_pi(sk)->conf_state = 0;
2743 l2cap_sock_clear_timer(sk);
2744
2745 if (!parent) {
2746 /* Outgoing channel.
2747 * Wake up socket sleeping on connect.
2748 */
2749 sk->sk_state = BT_CONNECTED;
2750 sk->sk_state_change(sk);
2751 } else {
2752 /* Incoming channel.
2753 * Wake up socket sleeping on accept.
2754 */
2755 parent->sk_data_ready(parent, 0);
2756 }
2757}
2758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759/* Copy frame to all raw sockets on that connection */
2760static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2761{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002764 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766 BT_DBG("conn %p", conn);
2767
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 read_lock(&l->lock);
2769 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2770 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 continue;
2772
2773 /* Don't send frame to the socket it came from */
2774 if (skb->sk == sk)
2775 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002776 nskb = skb_clone(skb, GFP_ATOMIC);
2777 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 continue;
2779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 kfree_skb(nskb);
2782 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002783 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784}
2785
2786/* ---- L2CAP signalling commands ---- */
2787static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2788 u8 code, u8 ident, u16 dlen, void *data)
2789{
2790 struct sk_buff *skb, **frag;
2791 struct l2cap_cmd_hdr *cmd;
2792 struct l2cap_hdr *lh;
2793 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002794 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002796 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2797 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002800 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
2802 skb = bt_skb_alloc(count, GFP_ATOMIC);
2803 if (!skb)
2804 return NULL;
2805
2806 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002807 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002808
2809 if (conn->hcon->type == LE_LINK)
2810 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2811 else
2812 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2815 cmd->code = code;
2816 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002817 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
2819 if (dlen) {
2820 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2821 memcpy(skb_put(skb, count), data, count);
2822 data += count;
2823 }
2824
2825 len -= skb->len;
2826
2827 /* Continuation fragments (no L2CAP header) */
2828 frag = &skb_shinfo(skb)->frag_list;
2829 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
2832 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2833 if (!*frag)
2834 goto fail;
2835
2836 memcpy(skb_put(*frag, count), data, count);
2837
2838 len -= count;
2839 data += count;
2840
2841 frag = &(*frag)->next;
2842 }
2843
2844 return skb;
2845
2846fail:
2847 kfree_skb(skb);
2848 return NULL;
2849}
2850
2851static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2852{
2853 struct l2cap_conf_opt *opt = *ptr;
2854 int len;
2855
2856 len = L2CAP_CONF_OPT_SIZE + opt->len;
2857 *ptr += len;
2858
2859 *type = opt->type;
2860 *olen = opt->len;
2861
2862 switch (opt->len) {
2863 case 1:
2864 *val = *((u8 *) opt->val);
2865 break;
2866
2867 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002868 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 break;
2870
2871 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002872 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 break;
2874
2875 default:
2876 *val = (unsigned long) opt->val;
2877 break;
2878 }
2879
2880 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2881 return len;
2882}
2883
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2885{
2886 struct l2cap_conf_opt *opt = *ptr;
2887
2888 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2889
2890 opt->type = type;
2891 opt->len = len;
2892
2893 switch (len) {
2894 case 1:
2895 *((u8 *) opt->val) = val;
2896 break;
2897
2898 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002899 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 break;
2901
2902 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002903 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 break;
2905
2906 default:
2907 memcpy(opt->val, (void *) val, len);
2908 break;
2909 }
2910
2911 *ptr += L2CAP_CONF_OPT_SIZE + len;
2912}
2913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002914static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002915{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002916 struct delayed_work *delayed =
2917 container_of(work, struct delayed_work, work);
2918 struct l2cap_pinfo *pi =
2919 container_of(delayed, struct l2cap_pinfo, ack_work);
2920 struct sock *sk = (struct sock *)pi;
2921 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002923 BT_DBG("sk %p", sk);
2924
2925 if (!sk)
2926 return;
2927
2928 lock_sock(sk);
2929
2930 if (!l2cap_pi(sk)->conn) {
2931 release_sock(sk);
2932 return;
2933 }
2934
2935 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2936 l2cap_pi(sk)->last_acked_seq,
2937 l2cap_pi(sk));
2938
2939 if (frames_to_ack)
2940 l2cap_ertm_send_rr_or_rnr(sk, 0);
2941
2942 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002943}
2944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002945static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002946{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002947 struct delayed_work *delayed =
2948 container_of(work, struct delayed_work, work);
2949 struct l2cap_pinfo *pi =
2950 container_of(delayed, struct l2cap_pinfo, retrans_work);
2951 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955 if (!sk)
2956 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002958 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002960 if (!l2cap_pi(sk)->conn) {
2961 release_sock(sk);
2962 return;
2963 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002965 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2966 release_sock(sk);
2967}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2970{
2971 struct delayed_work *delayed =
2972 container_of(work, struct delayed_work, work);
2973 struct l2cap_pinfo *pi =
2974 container_of(delayed, struct l2cap_pinfo, monitor_work);
2975 struct sock *sk = (struct sock *)pi;
2976
2977 BT_DBG("sk %p", sk);
2978
2979 if (!sk)
2980 return;
2981
2982 lock_sock(sk);
2983
2984 if (!l2cap_pi(sk)->conn) {
2985 release_sock(sk);
2986 return;
2987 }
2988
2989 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2990
2991 release_sock(sk);
2992}
2993
2994static inline void l2cap_ertm_init(struct sock *sk)
2995{
2996 l2cap_pi(sk)->next_tx_seq = 0;
2997 l2cap_pi(sk)->expected_tx_seq = 0;
2998 l2cap_pi(sk)->expected_ack_seq = 0;
2999 l2cap_pi(sk)->unacked_frames = 0;
3000 l2cap_pi(sk)->buffer_seq = 0;
3001 l2cap_pi(sk)->frames_sent = 0;
3002 l2cap_pi(sk)->last_acked_seq = 0;
3003 l2cap_pi(sk)->sdu = NULL;
3004 l2cap_pi(sk)->sdu_last_frag = NULL;
3005 l2cap_pi(sk)->sdu_len = 0;
3006 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
3007
3008 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3009 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
3010
3011 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
3012 l2cap_pi(sk)->rx_state);
3013
3014 l2cap_pi(sk)->amp_id = 0;
3015 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
3016 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
3017 l2cap_pi(sk)->amp_move_reqseq = 0;
3018 l2cap_pi(sk)->amp_move_event = 0;
3019
3020 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
3021 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
3022 l2cap_ertm_retrans_timeout);
3023 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
3024 l2cap_ertm_monitor_timeout);
3025 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
3026 skb_queue_head_init(SREJ_QUEUE(sk));
3027 skb_queue_head_init(TX_QUEUE(sk));
3028
3029 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
3030 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
3031 l2cap_pi(sk)->remote_tx_win);
3032}
3033
3034void l2cap_ertm_destruct(struct sock *sk)
3035{
3036 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
3037 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
3038}
3039
3040void l2cap_ertm_shutdown(struct sock *sk)
3041{
3042 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
3043 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
3044 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
3045}
3046
3047void l2cap_ertm_recv_done(struct sock *sk)
3048{
3049 lock_sock(sk);
3050
Mat Martineau28eb3fa2012-02-09 16:06:12 -08003051 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM ||
3052 sk->sk_state != BT_CONNECTED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003053 release_sock(sk);
3054 return;
3055 }
3056
3057 /* Consume any queued incoming frames and update local busy status */
3058 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
3059 l2cap_ertm_rx_queued_iframes(sk))
3060 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
3061 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3062 l2cap_rmem_available(sk))
3063 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
3064
3065 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003066}
3067
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003068static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3069{
3070 switch (mode) {
3071 case L2CAP_MODE_STREAMING:
3072 case L2CAP_MODE_ERTM:
3073 if (l2cap_mode_supported(mode, remote_feat_mask))
3074 return mode;
3075 /* fall through */
3076 default:
3077 return L2CAP_MODE_BASIC;
3078 }
3079}
3080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003083 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3084 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3085 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3086 pi->extended_control = 1;
3087 } else {
3088 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3089 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3090
3091 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3092 pi->extended_control = 0;
3093 }
3094}
3095
3096static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3097 struct hci_ext_fs *new,
3098 struct hci_ext_fs *agg)
3099{
3100 *agg = *cur;
3101 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3102 /* current flow spec has known rate */
3103 if ((new->max_sdu == 0xFFFF) ||
3104 (new->sdu_arr_time == 0xFFFFFFFF)) {
3105 /* new fs has unknown rate, so aggregate is unknown */
3106 agg->max_sdu = 0xFFFF;
3107 agg->sdu_arr_time = 0xFFFFFFFF;
3108 } else {
3109 /* new fs has known rate, so aggregate is known */
3110 u64 cur_rate;
3111 u64 new_rate;
3112 cur_rate = cur->max_sdu * 1000000ULL;
3113 if (cur->sdu_arr_time)
3114 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3115 new_rate = new->max_sdu * 1000000ULL;
3116 if (new->sdu_arr_time)
3117 new_rate = div_u64(new_rate, new->sdu_arr_time);
3118 cur_rate = cur_rate + new_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003119 if (cur_rate)
3120 agg->sdu_arr_time = div64_u64(
3121 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003122 }
3123 }
3124}
3125
3126static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3127{
3128 struct hci_ext_fs tx_fs;
3129 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003131 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003133 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3134 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3135 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3136 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3137 return 0;
3138
3139 l2cap_aggregate_fs(&chan->tx_fs,
3140 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3141 l2cap_aggregate_fs(&chan->rx_fs,
3142 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3143 hci_chan_modify(chan, &tx_fs, &rx_fs);
3144 return 1;
3145}
3146
3147static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3148 struct hci_ext_fs *old,
3149 struct hci_ext_fs *agg)
3150{
3151 *agg = *cur;
3152 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3153 u64 cur_rate;
3154 u64 old_rate;
3155 cur_rate = cur->max_sdu * 1000000ULL;
3156 if (cur->sdu_arr_time)
3157 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3158 old_rate = old->max_sdu * 1000000ULL;
3159 if (old->sdu_arr_time)
3160 old_rate = div_u64(old_rate, old->sdu_arr_time);
3161 cur_rate = cur_rate - old_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003162 if (cur_rate)
3163 agg->sdu_arr_time = div64_u64(
3164 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003165 }
3166}
3167
3168static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3169{
3170 struct hci_ext_fs tx_fs;
3171 struct hci_ext_fs rx_fs;
3172
3173 BT_DBG("chan %p", chan);
3174
3175 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3176 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3177 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3178 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3179 return 0;
3180
3181 l2cap_deaggregate_fs(&chan->tx_fs,
3182 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3183 l2cap_deaggregate_fs(&chan->rx_fs,
3184 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3185 hci_chan_modify(chan, &tx_fs, &rx_fs);
3186 return 1;
3187}
3188
Peter Krystada8417e62012-03-21 16:58:17 -07003189static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct sock *sk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003190{
Peter Krystada8417e62012-03-21 16:58:17 -07003191 struct l2cap_pinfo *pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003192 struct hci_dev *hdev;
3193 struct hci_conn *hcon;
3194 struct hci_chan *chan;
3195
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08003196 hdev = hci_dev_get(amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 if (!hdev)
3198 return NULL;
3199
3200 BT_DBG("hdev %s", hdev->name);
3201
3202 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003203 if (!hcon) {
3204 chan = NULL;
3205 goto done;
3206 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207
3208 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3209 if (chan) {
3210 l2cap_aggregate(chan, pi);
Peter Krystada8417e62012-03-21 16:58:17 -07003211 sock_hold(sk);
3212 chan->l2cap_sk = sk;
3213 hci_chan_hold(chan);
3214 pi->ampchan = chan;
Peter Krystadf7dcc792011-11-14 15:11:58 -08003215 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003216 }
3217
Peter Krystada8417e62012-03-21 16:58:17 -07003218 chan = hci_chan_add(hdev);
3219 if (chan) {
3220 chan->conn = hcon;
3221 sock_hold(sk);
3222 chan->l2cap_sk = sk;
3223 hci_chan_hold(chan);
3224 pi->ampchan = chan;
3225 hci_chan_create(chan,
3226 (struct hci_ext_fs *) &pi->local_fs,
3227 (struct hci_ext_fs *) &pi->remote_fs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003228 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08003229done:
3230 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003231 return chan;
3232}
3233
Mat Martineau966dcfa2011-12-12 10:45:31 -08003234static void l2cap_get_ertm_timeouts(struct l2cap_conf_rfc *rfc,
3235 struct l2cap_pinfo *pi)
3236{
3237 if (pi->amp_id && pi->ampcon) {
3238 u64 ertm_to = pi->ampcon->hdev->amp_be_flush_to;
3239
3240 /* Class 1 devices have must have ERTM timeouts
3241 * exceeding the Link Supervision Timeout. The
3242 * default Link Supervision Timeout for AMP
3243 * controllers is 10 seconds.
3244 *
3245 * Class 1 devices use 0xffffffff for their
3246 * best-effort flush timeout, so the clamping logic
3247 * will result in a timeout that meets the above
3248 * requirement. ERTM timeouts are 16-bit values, so
3249 * the maximum timeout is 65.535 seconds.
3250 */
3251
3252 /* Convert timeout to milliseconds and round */
3253 ertm_to = div_u64(ertm_to + 999, 1000);
3254
3255 /* This is the recommended formula for class 2 devices
3256 * that start ERTM timers when packets are sent to the
3257 * controller.
3258 */
3259 ertm_to = 3 * ertm_to + 500;
3260
3261 if (ertm_to > 0xffff)
3262 ertm_to = 0xffff;
3263
3264 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3265 rfc->monitor_timeout = rfc->retrans_timeout;
3266 } else {
3267 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3268 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3269 }
3270}
3271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003272int l2cap_build_conf_req(struct sock *sk, void *data)
3273{
3274 struct l2cap_pinfo *pi = l2cap_pi(sk);
3275 struct l2cap_conf_req *req = data;
3276 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3277 void *ptr = req->data;
3278
3279 BT_DBG("sk %p", sk);
3280
3281 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003282 goto done;
3283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003284 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003285 case L2CAP_MODE_STREAMING:
3286 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003287 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003288 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003289
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003290 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003291 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003292 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003293 break;
3294 }
3295
3296done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003297 if (pi->imtu != L2CAP_DEFAULT_MTU)
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003300 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003301 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003302 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3303 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003304 break;
3305
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003306 rfc.txwin_size = 0;
3307 rfc.max_transmit = 0;
3308 rfc.retrans_timeout = 0;
3309 rfc.monitor_timeout = 0;
3310 rfc.max_pdu_size = 0;
3311
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003312 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3313 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003314 break;
3315
3316 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003317 l2cap_setup_txwin(pi);
3318 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3319 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3320 else
3321 rfc.txwin_size = pi->tx_win;
Mat Martineau966dcfa2011-12-12 10:45:31 -08003322 rfc.max_transmit = pi->max_tx;
3323 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3324 l2cap_get_ertm_timeouts(&rfc, pi);
3325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003326 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3327 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003328
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3330 (unsigned long) &rfc);
3331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003332 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3333 pi->extended_control) {
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3335 pi->tx_win);
3336 }
3337
3338 if (pi->amp_id) {
3339 /* default best effort extended flow spec */
3340 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3341 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3342 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3343 sizeof(fs), (unsigned long) &fs);
3344 }
3345
3346 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003347 break;
3348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003349 if (pi->fcs == L2CAP_FCS_NONE ||
3350 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3351 pi->fcs = L2CAP_FCS_NONE;
3352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003353 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003354 break;
3355
3356 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003357 rfc.txwin_size = 0;
3358 rfc.max_transmit = 0;
3359 rfc.retrans_timeout = 0;
3360 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003361 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003362 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3363 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003364
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3366 (unsigned long) &rfc);
3367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003368 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3369 pi->extended_control) {
3370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3371 }
3372
3373 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003374 break;
3375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003376 if (pi->fcs == L2CAP_FCS_NONE ||
3377 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3378 pi->fcs = L2CAP_FCS_NONE;
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003380 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003381 break;
3382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003384 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003385 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386
3387 return ptr - data;
3388}
3389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003390
3391static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003393 struct l2cap_pinfo *pi = l2cap_pi(sk);
3394 struct l2cap_conf_req *req = data;
3395 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3396 void *ptr = req->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003397
3398 BT_DBG("sk %p", sk);
3399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003400 switch (pi->mode) {
3401 case L2CAP_MODE_ERTM:
3402 rfc.mode = L2CAP_MODE_ERTM;
3403 rfc.txwin_size = pi->tx_win;
3404 rfc.max_transmit = pi->max_tx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003405 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau966dcfa2011-12-12 10:45:31 -08003406 l2cap_get_ertm_timeouts(&rfc, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003407 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3408 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3409
3410 break;
3411
3412 default:
3413 return -ECONNREFUSED;
3414 }
3415
3416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3417 (unsigned long) &rfc);
3418
3419 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003420 /* TODO assign fcs for br/edr based on socket config option */
Mat Martineau966dcfa2011-12-12 10:45:31 -08003421 /* FCS is not used with AMP because it is redundant - lower
3422 * layers already include a checksum. */
3423 if (pi->amp_id)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003424 pi->local_conf.fcs = L2CAP_FCS_NONE;
3425 else
3426 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3427
Mat Martineau966dcfa2011-12-12 10:45:31 -08003428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->local_conf.fcs);
3429 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003430 }
3431
3432 req->dcid = cpu_to_le16(pi->dcid);
3433 req->flags = cpu_to_le16(0);
3434
3435 return ptr - data;
3436}
3437
3438static int l2cap_parse_conf_req(struct sock *sk, void *data)
3439{
3440 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003441 struct l2cap_conf_rsp *rsp = data;
3442 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003443 void *req = pi->conf_req;
3444 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003445 int type, hint, olen;
3446 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003447 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003448 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003449 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003450 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003452 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003453
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003454 while (len >= L2CAP_CONF_OPT_SIZE) {
3455 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003457 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003458 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003459
3460 switch (type) {
3461 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003462 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003463 break;
3464
3465 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003466 pi->flush_to = val;
3467 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3468 result = L2CAP_CONF_UNACCEPT;
3469 else
3470 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003471 break;
3472
3473 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003474 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3475 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003476 break;
3477
Marcel Holtmann6464f352007-10-20 13:39:51 +02003478 case L2CAP_CONF_RFC:
3479 if (olen == sizeof(rfc))
3480 memcpy(&rfc, (void *) val, olen);
3481 break;
3482
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003483 case L2CAP_CONF_FCS:
3484 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003485 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3486 pi->remote_conf.fcs = val;
3487 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003489 case L2CAP_CONF_EXT_FS:
3490 if (olen == sizeof(fs)) {
3491 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3492 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3493 result = L2CAP_CONF_UNACCEPT;
3494 break;
3495 }
3496 memcpy(&fs, (void *) val, olen);
3497 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3498 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3499 break;
3500 }
3501 pi->remote_conf.flush_to =
3502 le32_to_cpu(fs.flush_to);
3503 pi->remote_fs.id = fs.id;
3504 pi->remote_fs.type = fs.type;
3505 pi->remote_fs.max_sdu =
3506 le16_to_cpu(fs.max_sdu);
3507 pi->remote_fs.sdu_arr_time =
3508 le32_to_cpu(fs.sdu_arr_time);
3509 pi->remote_fs.acc_latency =
3510 le32_to_cpu(fs.acc_latency);
3511 pi->remote_fs.flush_to =
3512 le32_to_cpu(fs.flush_to);
3513 }
3514 break;
3515
3516 case L2CAP_CONF_EXT_WINDOW:
3517 pi->extended_control = 1;
3518 pi->remote_tx_win = val;
3519 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3520 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003521 break;
3522
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003523 default:
3524 if (hint)
3525 break;
3526
3527 result = L2CAP_CONF_UNKNOWN;
3528 *((u8 *) ptr++) = type;
3529 break;
3530 }
3531 }
3532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003533 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003534 goto done;
3535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003536 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003537 case L2CAP_MODE_STREAMING:
3538 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003539 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3540 pi->mode = l2cap_select_mode(rfc.mode,
3541 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003542 break;
3543 }
3544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003545 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003546 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003547
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003548 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003549 }
3550
3551done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003552 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003553 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003554 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003556 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003557 return -ECONNREFUSED;
3558
3559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3560 sizeof(rfc), (unsigned long) &rfc);
3561 }
3562
3563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003564 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3565 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3566 return -ECONNREFUSED;
3567
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003568 if (result == L2CAP_CONF_SUCCESS) {
3569 /* Configure output options and let the other side know
3570 * which ones we don't like. */
3571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003572 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003573 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003574 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003575 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003576 else {
3577 pi->omtu = mtu;
3578 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3579 }
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003581
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003582 switch (rfc.mode) {
3583 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003584 pi->fcs = L2CAP_FCS_NONE;
3585 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003586 break;
3587
3588 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003589 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3590 pi->remote_tx_win = rfc.txwin_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003591 pi->remote_max_tx = rfc.max_transmit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003592 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineau966dcfa2011-12-12 10:45:31 -08003593 l2cap_get_ertm_timeouts(&rfc, pi);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003595 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003596
3597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3598 sizeof(rfc), (unsigned long) &rfc);
3599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003600 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3602 sizeof(fs), (unsigned long) &fs);
3603
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003604 break;
3605
3606 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003607 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003609 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003610
3611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3612 sizeof(rfc), (unsigned long) &rfc);
3613
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003614 break;
3615
3616 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003617 result = L2CAP_CONF_UNACCEPT;
3618
3619 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003620 rfc.mode = pi->mode;
3621 }
3622
3623 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3624 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3625 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3626 result = L2CAP_CONF_PENDING;
3627
3628 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3629 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003630 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003631 /* Trigger logical link creation only on AMP */
3632
Peter Krystada8417e62012-03-21 16:58:17 -07003633 chan = l2cap_chan_admit(pi->amp_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003634 if (!chan)
3635 return -ECONNREFUSED;
3636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003637 if (chan->state == BT_CONNECTED)
3638 l2cap_create_cfm(chan, 0);
3639 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003640 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003641
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003642 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003643 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003644 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003645 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003646 rsp->result = cpu_to_le16(result);
3647 rsp->flags = cpu_to_le16(0x0000);
3648
3649 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
3651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003652static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003653{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003654 struct l2cap_pinfo *pi = l2cap_pi(sk);
3655 struct l2cap_conf_rsp *rsp = data;
3656 void *ptr = rsp->data;
3657 void *req = pi->conf_req;
3658 int len = pi->conf_len;
3659 int type, hint, olen;
3660 unsigned long val;
3661 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3662 struct l2cap_conf_ext_fs fs;
3663 u16 mtu = pi->omtu;
3664 u16 tx_win = pi->remote_tx_win;
3665 u16 result = L2CAP_CONF_SUCCESS;
3666
3667 BT_DBG("sk %p", sk);
3668
3669 while (len >= L2CAP_CONF_OPT_SIZE) {
3670 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3671
3672 hint = type & L2CAP_CONF_HINT;
3673 type &= L2CAP_CONF_MASK;
3674
3675 switch (type) {
3676 case L2CAP_CONF_MTU:
3677 mtu = val;
3678 break;
3679
3680 case L2CAP_CONF_FLUSH_TO:
3681 if (pi->amp_move_id)
3682 result = L2CAP_CONF_UNACCEPT;
3683 else
3684 pi->remote_conf.flush_to = val;
3685 break;
3686
3687 case L2CAP_CONF_QOS:
3688 if (pi->amp_move_id)
3689 result = L2CAP_CONF_UNACCEPT;
3690 break;
3691
3692 case L2CAP_CONF_RFC:
3693 if (olen == sizeof(rfc))
3694 memcpy(&rfc, (void *) val, olen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003695 break;
3696
3697 case L2CAP_CONF_FCS:
3698 pi->remote_conf.fcs = val;
3699 break;
3700
3701 case L2CAP_CONF_EXT_FS:
3702 if (olen == sizeof(fs)) {
3703 memcpy(&fs, (void *) val, olen);
3704 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3705 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3706 else {
3707 pi->remote_conf.flush_to =
3708 le32_to_cpu(fs.flush_to);
3709 }
3710 }
3711 break;
3712
3713 case L2CAP_CONF_EXT_WINDOW:
3714 tx_win = val;
3715 break;
3716
3717 default:
3718 if (hint)
3719 break;
3720
3721 result = L2CAP_CONF_UNKNOWN;
3722 *((u8 *) ptr++) = type;
3723 break;
3724 }
3725 }
3726
3727 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3728 result, pi->mode, rfc.mode);
3729
Mat Martineau966dcfa2011-12-12 10:45:31 -08003730 if (pi->mode != rfc.mode || rfc.mode == L2CAP_MODE_BASIC)
3731 result = L2CAP_CONF_UNACCEPT;
3732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003733 if (result == L2CAP_CONF_SUCCESS) {
3734 /* Configure output options and let the other side know
3735 * which ones we don't like. */
3736
3737 /* Don't allow mtu to decrease. */
3738 if (mtu < pi->omtu)
3739 result = L2CAP_CONF_UNACCEPT;
3740
3741 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3742
3743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3744
3745 /* Don't allow extended transmit window to change. */
3746 if (tx_win != pi->remote_tx_win) {
3747 result = L2CAP_CONF_UNACCEPT;
3748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3749 pi->remote_tx_win);
3750 }
3751
Mat Martineau966dcfa2011-12-12 10:45:31 -08003752 pi->remote_mps = rfc.max_pdu_size;
3753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003754 if (rfc.mode == L2CAP_MODE_ERTM) {
Mat Martineau966dcfa2011-12-12 10:45:31 -08003755 l2cap_get_ertm_timeouts(&rfc, pi);
3756 } else {
3757 rfc.retrans_timeout = 0;
3758 rfc.monitor_timeout = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003759 }
3760
Mat Martineau966dcfa2011-12-12 10:45:31 -08003761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3762 sizeof(rfc), (unsigned long) &rfc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003763 }
3764
3765 if (result != L2CAP_CONF_SUCCESS)
3766 goto done;
3767
Mat Martineau966dcfa2011-12-12 10:45:31 -08003768 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003769
Mat Martineau966dcfa2011-12-12 10:45:31 -08003770 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003771 pi->flush_to = pi->remote_conf.flush_to;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003772
3773done:
3774 rsp->scid = cpu_to_le16(pi->dcid);
3775 rsp->result = cpu_to_le16(result);
3776 rsp->flags = cpu_to_le16(0x0000);
3777
3778 return ptr - data;
3779}
3780
3781static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3782{
3783 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003784 struct l2cap_conf_req *req = data;
3785 void *ptr = req->data;
3786 int type, olen;
3787 unsigned long val;
3788 struct l2cap_conf_rfc rfc;
3789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003790 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003791
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003792 /* Initialize rfc in case no rfc option is received */
3793 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003794 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3795 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3796 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003797
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003798 while (len >= L2CAP_CONF_OPT_SIZE) {
3799 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3800
3801 switch (type) {
3802 case L2CAP_CONF_MTU:
3803 if (val < L2CAP_DEFAULT_MIN_MTU) {
3804 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003805 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003806 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003807 pi->imtu = val;
3808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003809 break;
3810
3811 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003812 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003813 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003814 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003815 break;
3816
3817 case L2CAP_CONF_RFC:
3818 if (olen == sizeof(rfc))
3819 memcpy(&rfc, (void *)val, olen);
3820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003821 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3822 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003823 return -ECONNREFUSED;
3824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003825 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003826
3827 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3828 sizeof(rfc), (unsigned long) &rfc);
3829 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003830
3831 case L2CAP_CONF_EXT_WINDOW:
3832 pi->tx_win = val;
3833
3834 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3835 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3836
3837 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3838 2, pi->tx_win);
3839 break;
3840
3841 default:
3842 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003843 }
3844 }
3845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003846 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003847 return -ECONNREFUSED;
3848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003849 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003850
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003851 if (*result == L2CAP_CONF_SUCCESS) {
3852 switch (rfc.mode) {
3853 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003854 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3855 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3856 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003857 break;
3858 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003859 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003860 }
3861 }
3862
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003863 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003864 req->flags = cpu_to_le16(0x0000);
3865
3866 return ptr - data;
3867}
3868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003869static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870{
3871 struct l2cap_conf_rsp *rsp = data;
3872 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003874 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003876 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003877 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003878 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
3880 return ptr - data;
3881}
3882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003883static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003884{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003885 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003886 int type, olen;
3887 unsigned long val;
3888 struct l2cap_conf_rfc rfc;
3889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003890 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003891
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003892 /* Initialize rfc in case no rfc option is received */
3893 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003894 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3895 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3896 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003898 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003899 return;
3900
3901 while (len >= L2CAP_CONF_OPT_SIZE) {
3902 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3903
3904 switch (type) {
3905 case L2CAP_CONF_RFC:
3906 if (olen == sizeof(rfc))
3907 memcpy(&rfc, (void *)val, olen);
3908 goto done;
3909 }
3910 }
3911
3912done:
3913 switch (rfc.mode) {
3914 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003915 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3916 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3917 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003918 break;
3919 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003920 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003921 }
3922}
3923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003924static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3925{
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 int type, olen;
3928 unsigned long val;
3929 struct l2cap_conf_ext_fs fs;
3930
3931 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3932
3933 while (len >= L2CAP_CONF_OPT_SIZE) {
3934 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3935 if ((type == L2CAP_CONF_EXT_FS) &&
3936 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3937 memcpy(&fs, (void *)val, olen);
3938 pi->local_fs.id = fs.id;
3939 pi->local_fs.type = fs.type;
3940 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3941 pi->local_fs.sdu_arr_time =
3942 le32_to_cpu(fs.sdu_arr_time);
3943 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3944 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3945 break;
3946 }
3947 }
3948
3949}
3950
3951static int l2cap_finish_amp_move(struct sock *sk)
3952{
3953 struct l2cap_pinfo *pi;
3954 int err;
3955
3956 BT_DBG("sk %p", sk);
3957
3958 pi = l2cap_pi(sk);
3959
3960 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3961 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3962
3963 if (pi->ampcon)
3964 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3965 else
3966 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3967
3968 err = l2cap_setup_resegment(sk);
3969
3970 return err;
3971}
3972
3973static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3974 u16 result)
3975{
3976 int err = 0;
3977 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3978 struct l2cap_pinfo *pi = l2cap_pi(sk);
3979
3980 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3981
3982 if (pi->reconf_state == L2CAP_RECONF_NONE)
3983 return -ECONNREFUSED;
3984
3985 if (result == L2CAP_CONF_SUCCESS) {
3986 while (len >= L2CAP_CONF_OPT_SIZE) {
3987 int type, olen;
3988 unsigned long val;
3989
3990 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3991
3992 if (type == L2CAP_CONF_RFC) {
3993 if (olen == sizeof(rfc))
3994 memcpy(&rfc, (void *)val, olen);
Mat Martineau966dcfa2011-12-12 10:45:31 -08003995
3996 if (rfc.mode != pi->mode) {
3997 l2cap_send_disconn_req(pi->conn, sk,
3998 ECONNRESET);
3999 return -ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004000 }
Mat Martineau966dcfa2011-12-12 10:45:31 -08004001
4002 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004003 }
4004 }
4005 }
4006
Mat Martineau966dcfa2011-12-12 10:45:31 -08004007 BT_ERR("Expected RFC option was missing, using existing values");
4008
4009 rfc.mode = pi->mode;
4010 rfc.retrans_timeout = cpu_to_le16(pi->retrans_timeout);
4011 rfc.monitor_timeout = cpu_to_le16(pi->monitor_timeout);
4012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004013done:
4014 l2cap_ertm_stop_ack_timer(pi);
4015 l2cap_ertm_stop_retrans_timer(pi);
4016 l2cap_ertm_stop_monitor_timer(pi);
4017
Mat Martineau966dcfa2011-12-12 10:45:31 -08004018 pi->mps = le16_to_cpu(rfc.max_pdu_size);
4019 if (pi->mode == L2CAP_MODE_ERTM) {
4020 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4021 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4022 }
4023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004024 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
4025 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
4026
4027 /* Respond to poll */
4028 err = l2cap_answer_move_poll(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004029 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004030 if (pi->mode == L2CAP_MODE_ERTM) {
4031 l2cap_ertm_tx(sk, NULL, NULL,
4032 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
4033 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
4034 }
4035 }
4036
4037 return err;
4038}
4039
4040
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004041static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4042{
4043 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
4044
4045 if (rej->reason != 0x0000)
4046 return 0;
4047
4048 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4049 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004050 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01004051
4052 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004053 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004054
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004055 l2cap_conn_start(conn);
4056 }
4057
4058 return 0;
4059}
4060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
4062 struct l2cap_cmd_hdr *cmd,
4063 u8 *data, u8 rsp_code,
4064 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004066 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4068 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04004069 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004070 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071
4072 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004073 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
4076
4077 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004078 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
4079 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 result = L2CAP_CR_BAD_PSM;
4081 goto sendresp;
4082 }
4083
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00004084 bh_lock_sock(parent);
4085
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004086 /* Check if the ACL is secure enough (if not SDP) */
4087 if (psm != cpu_to_le16(0x0001) &&
4088 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01004089 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004090 result = L2CAP_CR_SEC_BLOCK;
4091 goto response;
4092 }
4093
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094 result = L2CAP_CR_NO_MEM;
4095
4096 /* Check for backlog size */
4097 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004098 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 goto response;
4100 }
4101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004102 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
4103 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 goto response;
4105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004106 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
4108 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004109 if (__l2cap_get_chan_by_dcid(list, scid)) {
4110 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004112 l2cap_sock_kill(sk);
4113 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 goto response;
4115 }
4116
4117 hci_conn_hold(conn->hcon);
4118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004119 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 bacpy(&bt_sk(sk)->src, conn->src);
4121 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004122 l2cap_pi(sk)->psm = psm;
4123 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004125 bt_accept_enqueue(parent, sk);
4126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004127 __l2cap_chan_add(conn, sk);
4128 dcid = l2cap_pi(sk)->scid;
4129 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004131 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004133 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134
Marcel Holtmann984947d2009-02-06 23:35:19 +01004135 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004136 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004137 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004138 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004139 result = L2CAP_CR_PEND;
4140 status = L2CAP_CS_AUTHOR_PEND;
4141 parent->sk_data_ready(parent, 0);
4142 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004143 /* Force pending result for AMP controllers.
4144 * The connection will succeed after the
4145 * physical link is up. */
4146 if (amp_id) {
4147 sk->sk_state = BT_CONNECT2;
4148 result = L2CAP_CR_PEND;
4149 } else {
4150 sk->sk_state = BT_CONFIG;
4151 result = L2CAP_CR_SUCCESS;
4152 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004153 status = L2CAP_CS_NO_INFO;
4154 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004155 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004156 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004157 result = L2CAP_CR_PEND;
4158 status = L2CAP_CS_AUTHEN_PEND;
4159 }
4160 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004161 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004162 result = L2CAP_CR_PEND;
4163 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 }
4165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004166 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
4168response:
4169 bh_unlock_sock(parent);
4170
4171sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004172 rsp.scid = cpu_to_le16(scid);
4173 rsp.dcid = cpu_to_le16(dcid);
4174 rsp.result = cpu_to_le16(result);
4175 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004176 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004178 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004179 struct l2cap_info_req info;
4180 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4181
4182 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4183 conn->info_ident = l2cap_get_ident(conn);
4184
4185 mod_timer(&conn->info_timer, jiffies +
4186 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4187
4188 l2cap_send_cmd(conn, conn->info_ident,
4189 L2CAP_INFO_REQ, sizeof(info), &info);
4190 }
4191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004192 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004193 result == L2CAP_CR_SUCCESS) {
4194 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004195 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004196 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004197 l2cap_build_conf_req(sk, buf), buf);
4198 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004199 }
4200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004201 return sk;
4202}
4203
4204static inline int l2cap_connect_req(struct l2cap_conn *conn,
4205 struct l2cap_cmd_hdr *cmd, u8 *data)
4206{
4207 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 return 0;
4209}
4210
4211static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4212{
4213 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4214 u16 scid, dcid, result, status;
4215 struct sock *sk;
4216 u8 req[128];
4217
4218 scid = __le16_to_cpu(rsp->scid);
4219 dcid = __le16_to_cpu(rsp->dcid);
4220 result = __le16_to_cpu(rsp->result);
4221 status = __le16_to_cpu(rsp->status);
4222
4223 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4224
4225 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004226 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4227 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004228 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004230 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4231 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004232 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 }
4234
4235 switch (result) {
4236 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004237 sk->sk_state = BT_CONFIG;
4238 l2cap_pi(sk)->ident = 0;
4239 l2cap_pi(sk)->dcid = dcid;
4240 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004242 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004243 break;
4244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004245 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004248 l2cap_build_conf_req(sk, req), req);
4249 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 break;
4251
4252 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004253 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 break;
4255
4256 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004257 /* don't delete l2cap channel if sk is owned by user */
4258 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004259 sk->sk_state = BT_DISCONN;
4260 l2cap_sock_clear_timer(sk);
4261 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004262 break;
4263 }
4264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004265 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 break;
4267 }
4268
4269 bh_unlock_sock(sk);
4270 return 0;
4271}
4272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004273static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004274{
4275 /* FCS is enabled only in ERTM or streaming mode, if one or both
4276 * sides request it.
4277 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004278 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4279 pi->fcs = L2CAP_FCS_NONE;
4280 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4281 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004282}
4283
Al Viro88219a02007-07-29 00:17:25 -07004284static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285{
4286 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4287 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004288 u8 rspbuf[64];
4289 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004291 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004292 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 dcid = __le16_to_cpu(req->dcid);
4295 flags = __le16_to_cpu(req->flags);
4296
4297 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004299 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4300 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 return -ENOENT;
4302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004303 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4304 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4305 sk->sk_state, l2cap_pi(sk)->rx_state,
4306 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4307 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004308
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004309 /* Detect a reconfig request due to channel move between
4310 * BR/EDR and AMP
4311 */
4312 if (sk->sk_state == BT_CONNECTED &&
4313 l2cap_pi(sk)->rx_state ==
4314 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4315 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4316
4317 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4318 amp_move_reconf = 1;
4319
4320 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004321 struct l2cap_cmd_rej rej;
4322
4323 rej.reason = cpu_to_le16(0x0002);
4324 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4325 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004326 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004327 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004328
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004329 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004330 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004331 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004332 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004333 l2cap_build_conf_rsp(sk, rspbuf,
4334 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004335 goto unlock;
4336 }
4337
4338 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004339 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4340 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341
4342 if (flags & 0x0001) {
4343 /* Incomplete config. Send empty response. */
4344 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004345 l2cap_build_conf_rsp(sk, rspbuf,
4346 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 goto unlock;
4348 }
4349
4350 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004351 if (!amp_move_reconf)
4352 len = l2cap_parse_conf_req(sk, rspbuf);
4353 else
4354 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4355
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004356 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004357 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004361 l2cap_pi(sk)->conf_ident = cmd->ident;
4362 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4363
4364 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4365 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4366 !l2cap_pi(sk)->amp_id) {
4367 /* Send success response right after pending if using
4368 * lockstep config on BR/EDR
4369 */
4370 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4371 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4372 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4373 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004374
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004375 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004376 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004378 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004379 goto unlock;
4380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004381 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004383 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4384 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004386 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4387 set_default_fcs(l2cap_pi(sk));
4388
4389 sk->sk_state = BT_CONNECTED;
4390
4391 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4392 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4393 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004394
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004396 goto unlock;
4397 }
4398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004399 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004400 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004401 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004403 l2cap_build_conf_req(sk, buf), buf);
4404 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 }
4406
4407unlock:
4408 bh_unlock_sock(sk);
4409 return 0;
4410}
4411
4412static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4413{
4414 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4415 u16 scid, flags, result;
4416 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004417 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004418 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419
4420 scid = __le16_to_cpu(rsp->scid);
4421 flags = __le16_to_cpu(rsp->flags);
4422 result = __le16_to_cpu(rsp->result);
4423
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004424 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4425 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004427 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4428 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429 return 0;
4430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004431 pi = l2cap_pi(sk);
4432
4433 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4434 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4435 goto done;
4436 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 switch (result) {
4439 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004440 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4441 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4442 /* Lockstep procedure requires a pending response
4443 * before success.
4444 */
4445 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4446 goto done;
4447 }
4448
4449 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 break;
4451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004452 case L2CAP_CONF_PENDING:
4453 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4454 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4455 goto done;
4456 }
4457
4458 l2cap_conf_rfc_get(sk, rsp->data, len);
4459
4460 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4461
Peter Krystadf453bb32011-07-19 17:23:34 -07004462 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004463
4464 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4465 struct hci_chan *chan;
4466
4467 /* Already sent a 'pending' response, so set up
4468 * the logical link now
4469 */
Peter Krystada8417e62012-03-21 16:58:17 -07004470 chan = l2cap_chan_admit(pi->amp_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004471 if (!chan) {
4472 l2cap_send_disconn_req(pi->conn, sk,
4473 ECONNRESET);
4474 goto done;
4475 }
4476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004477 if (chan->state == BT_CONNECTED)
4478 l2cap_create_cfm(chan, 0);
4479 }
4480
4481 goto done;
4482
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004484 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004485 char req[64];
4486
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004487 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004488 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004489 goto done;
4490 }
4491
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004492 /* throw out any old stored conf requests */
4493 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004494 len = l2cap_parse_conf_rsp(sk, rsp->data,
4495 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004496 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004497 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004498 goto done;
4499 }
4500
4501 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4502 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004503 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004504 if (result != L2CAP_CONF_SUCCESS)
4505 goto done;
4506 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 }
4508
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004509 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004510 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004511 l2cap_sock_set_timer(sk, HZ * 5);
4512 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 goto done;
4514 }
4515
4516 if (flags & 0x01)
4517 goto done;
4518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004519 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004521 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4522 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004524 sk->sk_state = BT_CONNECTED;
4525
4526 if (pi->mode == L2CAP_MODE_ERTM ||
4527 pi->mode == L2CAP_MODE_STREAMING)
4528 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004529
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 l2cap_chan_ready(sk);
4531 }
4532
4533done:
4534 bh_unlock_sock(sk);
4535 return 0;
4536}
4537
4538static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4539{
4540 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4541 struct l2cap_disconn_rsp rsp;
4542 u16 dcid, scid;
4543 struct sock *sk;
4544
4545 scid = __le16_to_cpu(req->scid);
4546 dcid = __le16_to_cpu(req->dcid);
4547
4548 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004550 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4551 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 return 0;
4553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004554 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4555 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004558 /* Only do cleanup if a disconnect request was not sent already */
4559 if (sk->sk_state != BT_DISCONN) {
4560 sk->sk_shutdown = SHUTDOWN_MASK;
4561
Mat Martineau380dcd42011-12-19 10:11:30 -08004562 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004563 skb_queue_purge(TX_QUEUE(sk));
4564
4565 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4566 skb_queue_purge(SREJ_QUEUE(sk));
4567
4568 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4569 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4570 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4571 }
4572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004574 /* don't delete l2cap channel if sk is owned by user */
4575 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004576 sk->sk_state = BT_DISCONN;
4577 l2cap_sock_clear_timer(sk);
4578 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004579 bh_unlock_sock(sk);
4580 return 0;
4581 }
4582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004583 l2cap_chan_del(sk, ECONNRESET);
4584
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 bh_unlock_sock(sk);
4586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004587 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 return 0;
4589}
4590
4591static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4592{
4593 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4594 u16 dcid, scid;
4595 struct sock *sk;
4596
4597 scid = __le16_to_cpu(rsp->scid);
4598 dcid = __le16_to_cpu(rsp->dcid);
4599
4600 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004602 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4603 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 return 0;
4605
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004606 /* don't delete l2cap channel if sk is owned by user */
4607 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004608 sk->sk_state = BT_DISCONN;
4609 l2cap_sock_clear_timer(sk);
4610 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004611 bh_unlock_sock(sk);
4612 return 0;
4613 }
4614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004615 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 bh_unlock_sock(sk);
4617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004618 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 return 0;
4620}
4621
4622static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4623{
4624 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 u16 type;
4626
4627 type = __le16_to_cpu(req->type);
4628
4629 BT_DBG("type 0x%4.4x", type);
4630
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004631 if (type == L2CAP_IT_FEAT_MASK) {
4632 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004633 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004634 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4635 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4636 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004637 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004638 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004639 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004640 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004641 l2cap_send_cmd(conn, cmd->ident,
4642 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004643 } else if (type == L2CAP_IT_FIXED_CHAN) {
4644 u8 buf[12];
4645 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4646 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4647 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4648 memcpy(buf + 4, l2cap_fixed_chan, 8);
4649 l2cap_send_cmd(conn, cmd->ident,
4650 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004651 } else {
4652 struct l2cap_info_rsp rsp;
4653 rsp.type = cpu_to_le16(type);
4654 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4655 l2cap_send_cmd(conn, cmd->ident,
4656 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658
4659 return 0;
4660}
4661
4662static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4663{
4664 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4665 u16 type, result;
4666
4667 type = __le16_to_cpu(rsp->type);
4668 result = __le16_to_cpu(rsp->result);
4669
4670 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4671
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004672 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4673 if (cmd->ident != conn->info_ident ||
4674 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4675 return 0;
4676
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004677 del_timer(&conn->info_timer);
4678
Ville Tervoadb08ed2010-08-04 09:43:33 +03004679 if (result != L2CAP_IR_SUCCESS) {
4680 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4681 conn->info_ident = 0;
4682
4683 l2cap_conn_start(conn);
4684
4685 return 0;
4686 }
4687
Marcel Holtmann984947d2009-02-06 23:35:19 +01004688 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004689 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004690
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004691 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004692 struct l2cap_info_req req;
4693 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4694
4695 conn->info_ident = l2cap_get_ident(conn);
4696
4697 l2cap_send_cmd(conn, conn->info_ident,
4698 L2CAP_INFO_REQ, sizeof(req), &req);
4699 } else {
4700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4701 conn->info_ident = 0;
4702
4703 l2cap_conn_start(conn);
4704 }
4705 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004706 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004707 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004708 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004709
4710 l2cap_conn_start(conn);
4711 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004712
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713 return 0;
4714}
4715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004716static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4717 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4718{
4719 struct l2cap_move_chan_req req;
4720 u8 ident;
4721
4722 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4723 (int) dest_amp_id);
4724
4725 ident = l2cap_get_ident(conn);
4726 if (pi)
4727 pi->ident = ident;
4728
4729 req.icid = cpu_to_le16(icid);
4730 req.dest_amp_id = dest_amp_id;
4731
4732 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4733}
4734
4735static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4736 u16 icid, u16 result)
4737{
4738 struct l2cap_move_chan_rsp rsp;
4739
4740 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4741
4742 rsp.icid = cpu_to_le16(icid);
4743 rsp.result = cpu_to_le16(result);
4744
4745 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4746}
4747
4748static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4749 struct l2cap_pinfo *pi, u16 icid, u16 result)
4750{
4751 struct l2cap_move_chan_cfm cfm;
4752 u8 ident;
4753
4754 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4755
4756 ident = l2cap_get_ident(conn);
4757 if (pi)
4758 pi->ident = ident;
4759
4760 cfm.icid = cpu_to_le16(icid);
4761 cfm.result = cpu_to_le16(result);
4762
4763 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4764}
4765
4766static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4767 u16 icid)
4768{
4769 struct l2cap_move_chan_cfm_rsp rsp;
4770
4771 BT_DBG("icid %d", (int) icid);
4772
4773 rsp.icid = cpu_to_le16(icid);
4774 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4775}
4776
4777static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4778 struct l2cap_cmd_hdr *cmd, u8 *data)
4779{
4780 struct l2cap_create_chan_req *req =
4781 (struct l2cap_create_chan_req *) data;
4782 struct sock *sk;
4783 u16 psm, scid;
4784
4785 psm = le16_to_cpu(req->psm);
4786 scid = le16_to_cpu(req->scid);
4787
4788 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4789 (int) req->amp_id);
4790
4791 if (req->amp_id) {
4792 struct hci_dev *hdev;
4793
4794 /* Validate AMP controller id */
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004795 hdev = hci_dev_get(req->amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004796 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4797 struct l2cap_create_chan_rsp rsp;
4798
4799 rsp.dcid = 0;
4800 rsp.scid = cpu_to_le16(scid);
4801 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4802 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4803
4804 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4805 sizeof(rsp), &rsp);
4806
4807 if (hdev)
4808 hci_dev_put(hdev);
4809
4810 return 0;
4811 }
4812
4813 hci_dev_put(hdev);
4814 }
4815
4816 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4817 req->amp_id);
4818
Mat Martineau55f2a622011-09-19 13:20:17 -07004819 if (sk)
4820 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004821
Mat Martineau55f2a622011-09-19 13:20:17 -07004822 if (sk && req->amp_id &&
4823 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004824 amp_accept_physical(conn, req->amp_id, sk);
4825
4826 return 0;
4827}
4828
4829static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4830 struct l2cap_cmd_hdr *cmd, u8 *data)
4831{
4832 BT_DBG("conn %p", conn);
4833
4834 return l2cap_connect_rsp(conn, cmd, data);
4835}
4836
4837static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4838 struct l2cap_cmd_hdr *cmd, u8 *data)
4839{
4840 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4841 struct sock *sk;
4842 struct l2cap_pinfo *pi;
4843 u16 icid = 0;
4844 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4845
4846 icid = le16_to_cpu(req->icid);
4847
4848 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4849
4850 read_lock(&conn->chan_list.lock);
4851 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4852 read_unlock(&conn->chan_list.lock);
4853
4854 if (!sk)
4855 goto send_move_response;
4856
4857 lock_sock(sk);
4858 pi = l2cap_pi(sk);
4859
4860 if (pi->scid < L2CAP_CID_DYN_START ||
4861 (pi->mode != L2CAP_MODE_ERTM &&
4862 pi->mode != L2CAP_MODE_STREAMING)) {
4863 goto send_move_response;
4864 }
4865
4866 if (pi->amp_id == req->dest_amp_id) {
4867 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4868 goto send_move_response;
4869 }
4870
4871 if (req->dest_amp_id) {
4872 struct hci_dev *hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004873 hdev = hci_dev_get(req->dest_amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004874 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4875 if (hdev)
4876 hci_dev_put(hdev);
4877
4878 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4879 goto send_move_response;
4880 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08004881 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004882 }
4883
4884 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4885 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4886 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4887 bacmp(conn->src, conn->dst) > 0) {
4888 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4889 goto send_move_response;
4890 }
4891
4892 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4893 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4894 goto send_move_response;
4895 }
4896
4897 pi->amp_move_cmd_ident = cmd->ident;
4898 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4899 l2cap_amp_move_setup(sk);
4900 pi->amp_move_id = req->dest_amp_id;
4901 icid = pi->dcid;
4902
4903 if (req->dest_amp_id == 0) {
4904 /* Moving to BR/EDR */
4905 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4906 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4907 result = L2CAP_MOVE_CHAN_PENDING;
4908 } else {
4909 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4910 result = L2CAP_MOVE_CHAN_SUCCESS;
4911 }
4912 } else {
4913 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4914 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4915 result = L2CAP_MOVE_CHAN_PENDING;
4916 }
4917
4918send_move_response:
4919 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4920
4921 if (sk)
4922 release_sock(sk);
4923
4924 return 0;
4925}
4926
4927static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4928 struct l2cap_cmd_hdr *cmd, u8 *data)
4929{
4930 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4931 struct sock *sk;
4932 struct l2cap_pinfo *pi;
4933 u16 icid, result;
4934
4935 icid = le16_to_cpu(rsp->icid);
4936 result = le16_to_cpu(rsp->result);
4937
4938 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4939
4940 switch (result) {
4941 case L2CAP_MOVE_CHAN_SUCCESS:
4942 case L2CAP_MOVE_CHAN_PENDING:
4943 read_lock(&conn->chan_list.lock);
4944 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4945 read_unlock(&conn->chan_list.lock);
4946
4947 if (!sk) {
4948 l2cap_send_move_chan_cfm(conn, NULL, icid,
4949 L2CAP_MOVE_CHAN_UNCONFIRMED);
4950 break;
4951 }
4952
4953 lock_sock(sk);
4954 pi = l2cap_pi(sk);
4955
4956 l2cap_sock_clear_timer(sk);
4957 if (result == L2CAP_MOVE_CHAN_PENDING)
4958 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4959
4960 if (pi->amp_move_state ==
4961 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4962 /* Move confirm will be sent when logical link
4963 * is complete.
4964 */
4965 pi->amp_move_state =
4966 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
Peter Krystad0750f602012-03-19 15:58:20 -07004967 } else if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004968 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
Peter Krystad0750f602012-03-19 15:58:20 -07004969 if (result == L2CAP_MOVE_CHAN_PENDING) {
4970 break;
4971 } else if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004972 pi->amp_move_state =
4973 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4974 } else {
Peter Krystad0750f602012-03-19 15:58:20 -07004975 /* Logical link is up or moving to BR/EDR,
4976 * proceed with move */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004977 pi->amp_move_state =
4978 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4979 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4980 L2CAP_MOVE_CHAN_CONFIRMED);
4981 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4982 }
4983 } else if (pi->amp_move_state ==
4984 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4985 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4987 struct hci_chan *chan;
4988 /* Moving to AMP */
4989 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4990 /* Remote is ready, send confirm immediately
4991 * after logical link is ready
4992 */
4993 pi->amp_move_state =
4994 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4995 } else {
4996 /* Both logical link and move success
4997 * are required to confirm
4998 */
4999 pi->amp_move_state =
5000 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
5001 }
5002 pi->remote_fs = default_fs;
5003 pi->local_fs = default_fs;
Peter Krystada8417e62012-03-21 16:58:17 -07005004 chan = l2cap_chan_admit(pi->amp_move_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005005 if (!chan) {
5006 /* Logical link not available */
5007 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5008 L2CAP_MOVE_CHAN_UNCONFIRMED);
5009 break;
5010 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005012 if (chan->state == BT_CONNECTED) {
5013 /* Logical link is already ready to go */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005014 pi->ampcon = chan->conn;
5015 pi->ampcon->l2cap_data = pi->conn;
5016 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
5017 /* Can confirm now */
5018 l2cap_send_move_chan_cfm(conn, pi,
5019 pi->scid,
5020 L2CAP_MOVE_CHAN_CONFIRMED);
5021 } else {
5022 /* Now only need move success
5023 * required to confirm
5024 */
5025 pi->amp_move_state =
5026 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5027 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005028
5029 l2cap_create_cfm(chan, 0);
5030 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005031 } else {
5032 /* Any other amp move state means the move failed. */
Peter Krystad42778422012-02-28 15:20:59 -08005033 pi->amp_move_id = pi->amp_id;
5034 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5035 l2cap_amp_move_revert(sk);
5036 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005037 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5038 L2CAP_MOVE_CHAN_UNCONFIRMED);
5039 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5040 }
5041 break;
5042 default:
5043 /* Failed (including collision case) */
5044 read_lock(&conn->chan_list.lock);
5045 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
5046 read_unlock(&conn->chan_list.lock);
5047
5048 if (!sk) {
5049 /* Could not locate channel, icid is best guess */
5050 l2cap_send_move_chan_cfm(conn, NULL, icid,
5051 L2CAP_MOVE_CHAN_UNCONFIRMED);
5052 break;
5053 }
5054
5055 lock_sock(sk);
5056 pi = l2cap_pi(sk);
5057
5058 l2cap_sock_clear_timer(sk);
5059
5060 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5061 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
5062 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
5063 else {
5064 /* Cleanup - cancel move */
5065 pi->amp_move_id = pi->amp_id;
5066 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5067 l2cap_amp_move_revert(sk);
5068 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5069 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005070 }
5071
5072 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5073 L2CAP_MOVE_CHAN_UNCONFIRMED);
5074 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5075 break;
5076 }
5077
5078 if (sk)
5079 release_sock(sk);
5080
5081 return 0;
5082}
5083
5084static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd, u8 *data)
5086{
5087 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
5088 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005089 struct l2cap_pinfo *pi;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005090 u16 icid, result;
5091
5092 icid = le16_to_cpu(cfm->icid);
5093 result = le16_to_cpu(cfm->result);
5094
5095 BT_DBG("icid %d, result %d", (int) icid, (int) result);
5096
5097 read_lock(&conn->chan_list.lock);
5098 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
5099 read_unlock(&conn->chan_list.lock);
5100
5101 if (!sk) {
5102 BT_DBG("Bad channel (%d)", (int) icid);
5103 goto send_move_confirm_response;
5104 }
5105
5106 lock_sock(sk);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005107 pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005108
Mat Martineau9f8d4672011-12-14 12:10:46 -08005109 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
5110 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005111 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005112 pi->amp_id = pi->amp_move_id;
5113 if (!pi->amp_id && pi->ampchan) {
5114 struct hci_chan *ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005115 struct hci_conn *ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005116 /* Have moved off of AMP, free the channel */
Mat Martineau9f8d4672011-12-14 12:10:46 -08005117 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005118 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005119 if (hci_chan_put(ampchan))
5120 ampcon->l2cap_data = NULL;
5121 else
5122 l2cap_deaggregate(ampchan, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005123 }
5124 l2cap_amp_move_success(sk);
5125 } else {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005126 pi->amp_move_id = pi->amp_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005127 l2cap_amp_move_revert(sk);
5128 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005129 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5130 } else if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005131 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005132 BT_DBG("Bad AMP_MOVE_STATE (%d)", pi->amp_move_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005133 }
5134
5135send_move_confirm_response:
5136 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5137
5138 if (sk)
5139 release_sock(sk);
5140
5141 return 0;
5142}
5143
5144static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5145 struct l2cap_cmd_hdr *cmd, u8 *data)
5146{
5147 struct l2cap_move_chan_cfm_rsp *rsp =
5148 (struct l2cap_move_chan_cfm_rsp *) data;
5149 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005150 struct l2cap_pinfo *pi;
5151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005152 u16 icid;
5153
5154 icid = le16_to_cpu(rsp->icid);
5155
5156 BT_DBG("icid %d", (int) icid);
5157
5158 read_lock(&conn->chan_list.lock);
5159 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5160 read_unlock(&conn->chan_list.lock);
5161
5162 if (!sk)
5163 return 0;
5164
5165 lock_sock(sk);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005166 pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005167
5168 l2cap_sock_clear_timer(sk);
5169
Mat Martineau9f8d4672011-12-14 12:10:46 -08005170 if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005171 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005172 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5173 pi->amp_id = pi->amp_move_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005174
Peter Krystaddcfeee22012-03-07 12:51:18 -08005175 if (!pi->amp_id && pi->ampchan) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005176 struct hci_chan *ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005177 struct hci_conn *ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005178 /* Have moved off of AMP, free the channel */
Mat Martineau9f8d4672011-12-14 12:10:46 -08005179 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005180 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005181 if (hci_chan_put(ampchan))
5182 ampcon->l2cap_data = NULL;
5183 else
Mat Martineau9f8d4672011-12-14 12:10:46 -08005184 l2cap_deaggregate(ampchan, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005185 }
5186
5187 l2cap_amp_move_success(sk);
5188
Mat Martineau9f8d4672011-12-14 12:10:46 -08005189 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005190 }
5191
5192 release_sock(sk);
5193
5194 return 0;
5195}
5196
5197static void l2cap_amp_signal_worker(struct work_struct *work)
5198{
5199 int err = 0;
5200 struct l2cap_amp_signal_work *ampwork =
5201 container_of(work, struct l2cap_amp_signal_work, work);
5202
5203 switch (ampwork->cmd.code) {
5204 case L2CAP_MOVE_CHAN_REQ:
5205 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5206 ampwork->data);
5207 break;
5208
5209 case L2CAP_MOVE_CHAN_RSP:
5210 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5211 ampwork->data);
5212 break;
5213
5214 case L2CAP_MOVE_CHAN_CFM:
5215 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5216 ampwork->data);
5217 break;
5218
5219 case L2CAP_MOVE_CHAN_CFM_RSP:
5220 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5221 &ampwork->cmd, ampwork->data);
5222 break;
5223
5224 default:
5225 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5226 err = -EINVAL;
5227 break;
5228 }
5229
5230 if (err) {
5231 struct l2cap_cmd_rej rej;
5232 BT_DBG("error %d", err);
5233
5234 /* In this context, commands are only rejected with
5235 * "command not understood", code 0.
5236 */
5237 rej.reason = cpu_to_le16(0);
5238 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5239 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5240 }
5241
5242 kfree_skb(ampwork->skb);
5243 kfree(ampwork);
5244}
5245
5246void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5247 struct sock *sk)
5248{
5249 struct l2cap_pinfo *pi;
5250
5251 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5252 (int) local_id, (int) remote_id, sk);
5253
5254 lock_sock(sk);
5255
5256 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5257 release_sock(sk);
5258 return;
5259 }
5260
5261 pi = l2cap_pi(sk);
5262
5263 if (sk->sk_state != BT_CONNECTED) {
5264 if (bt_sk(sk)->parent) {
5265 struct l2cap_conn_rsp rsp;
5266 char buf[128];
5267 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5268 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5269
5270 /* Incoming channel on AMP */
5271 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5272 /* Send successful response */
5273 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5274 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5275 } else {
5276 /* Send negative response */
5277 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5278 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5279 }
5280
5281 l2cap_send_cmd(pi->conn, pi->ident,
5282 L2CAP_CREATE_CHAN_RSP,
5283 sizeof(rsp), &rsp);
5284
5285 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5286 sk->sk_state = BT_CONFIG;
5287 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5288 l2cap_send_cmd(pi->conn,
5289 l2cap_get_ident(pi->conn),
5290 L2CAP_CONF_REQ,
5291 l2cap_build_conf_req(sk, buf), buf);
5292 l2cap_pi(sk)->num_conf_req++;
5293 }
5294 } else {
5295 /* Outgoing channel on AMP */
5296 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5297 /* Revert to BR/EDR connect */
5298 l2cap_send_conn_req(sk);
5299 } else {
5300 pi->amp_id = local_id;
5301 l2cap_send_create_chan_req(sk, remote_id);
5302 }
5303 }
5304 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5305 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5306 l2cap_amp_move_setup(sk);
5307 pi->amp_move_id = local_id;
5308 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5309
5310 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5311 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5312 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5313 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5314 struct hci_chan *chan;
5315 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5316 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5317 pi->remote_fs = default_fs;
5318 pi->local_fs = default_fs;
Peter Krystada8417e62012-03-21 16:58:17 -07005319 chan = l2cap_chan_admit(local_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005320 if (chan) {
5321 if (chan->state == BT_CONNECTED) {
5322 /* Logical link is ready to go */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005323 pi->ampcon = chan->conn;
5324 pi->ampcon->l2cap_data = pi->conn;
5325 pi->amp_move_state =
5326 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5327 l2cap_send_move_chan_rsp(pi->conn,
5328 pi->amp_move_cmd_ident, pi->dcid,
5329 L2CAP_MOVE_CHAN_SUCCESS);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005330
5331 l2cap_create_cfm(chan, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005332 } else {
5333 /* Wait for logical link to be ready */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005334 pi->amp_move_state =
5335 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5336 }
5337 } else {
5338 /* Logical link not available */
5339 l2cap_send_move_chan_rsp(pi->conn,
5340 pi->amp_move_cmd_ident, pi->dcid,
5341 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5342 }
5343 } else {
5344 BT_DBG("result %d, role %d, local_busy %d", result,
5345 (int) pi->amp_move_role,
5346 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5347
5348 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5349 if (result == -EINVAL)
5350 l2cap_send_move_chan_rsp(pi->conn,
5351 pi->amp_move_cmd_ident, pi->dcid,
5352 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5353 else
5354 l2cap_send_move_chan_rsp(pi->conn,
5355 pi->amp_move_cmd_ident, pi->dcid,
5356 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5357 }
5358
5359 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5360 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5361
5362 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5363 l2cap_rmem_available(sk))
5364 l2cap_ertm_tx(sk, 0, 0,
5365 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5366
5367 /* Restart data transmission */
5368 l2cap_ertm_send(sk);
5369 }
5370
5371 release_sock(sk);
5372}
5373
5374int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5375{
5376 struct l2cap_pinfo *pi;
5377 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005378 struct hci_chan *ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005379 struct hci_conn *ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005380
5381 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5382
5383 sk = chan->l2cap_sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005384 chan->l2cap_sk = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005385
5386 BT_DBG("sk %p", sk);
5387
5388 lock_sock(sk);
5389
5390 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5391 release_sock(sk);
5392 return 0;
5393 }
5394
5395 pi = l2cap_pi(sk);
5396
5397 if ((!status) && (chan != NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005398 pi->ampcon = chan->conn;
5399 pi->ampcon->l2cap_data = pi->conn;
5400
Peter Krystad42778422012-02-28 15:20:59 -08005401 BT_DBG("amp_move_state %d", pi->amp_move_state);
5402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005403 if (sk->sk_state != BT_CONNECTED) {
5404 struct l2cap_conf_rsp rsp;
5405
5406 /* Must use spinlock to prevent concurrent
5407 * execution of l2cap_config_rsp()
5408 */
5409 bh_lock_sock(sk);
5410 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5411 l2cap_build_conf_rsp(sk, &rsp,
5412 L2CAP_CONF_SUCCESS, 0), &rsp);
5413 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5414
5415 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5416 set_default_fcs(l2cap_pi(sk));
5417
5418 sk->sk_state = BT_CONNECTED;
5419
5420 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5421 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5422 l2cap_ertm_init(sk);
5423
5424 l2cap_chan_ready(sk);
5425 }
5426 bh_unlock_sock(sk);
5427 } else if (pi->amp_move_state ==
5428 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5429 /* Move confirm will be sent after a success
5430 * response is received
5431 */
5432 pi->amp_move_state =
5433 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5434 } else if (pi->amp_move_state ==
5435 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5436 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5437 pi->amp_move_state =
5438 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5439 else if (pi->amp_move_role ==
5440 L2CAP_AMP_MOVE_INITIATOR) {
5441 pi->amp_move_state =
5442 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5443 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5444 L2CAP_MOVE_CHAN_SUCCESS);
5445 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5446 } else if (pi->amp_move_role ==
5447 L2CAP_AMP_MOVE_RESPONDER) {
5448 pi->amp_move_state =
5449 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5450 l2cap_send_move_chan_rsp(pi->conn,
5451 pi->amp_move_cmd_ident, pi->dcid,
5452 L2CAP_MOVE_CHAN_SUCCESS);
5453 }
Peter Krystad42778422012-02-28 15:20:59 -08005454 } else if ((pi->amp_move_state !=
5455 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) &&
5456 (pi->amp_move_state !=
Peter Krystada609f1d2012-04-03 16:06:17 -07005457 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) &&
5458 (pi->amp_move_state !=
5459 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP)) {
Peter Krystaddcfeee22012-03-07 12:51:18 -08005460 /* Move was not in expected state, free the channel */
Peter Krystad42778422012-02-28 15:20:59 -08005461 ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005462 ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005463 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005464 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005465 if (ampchan) {
5466 if (hci_chan_put(ampchan))
5467 ampcon->l2cap_data = NULL;
5468 else
5469 l2cap_deaggregate(ampchan, pi);
5470 }
Peter Krystad42778422012-02-28 15:20:59 -08005471 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005472 }
5473 } else {
5474 /* Logical link setup failed. */
5475
5476 if (sk->sk_state != BT_CONNECTED)
5477 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5478 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5479 l2cap_amp_move_revert(sk);
5480 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5481 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5482 l2cap_send_move_chan_rsp(pi->conn,
5483 pi->amp_move_cmd_ident, pi->dcid,
5484 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5485 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5486 if ((pi->amp_move_state ==
5487 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5488 (pi->amp_move_state ==
5489 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5490 /* Remote has only sent pending or
5491 * success responses, clean up
5492 */
5493 l2cap_amp_move_revert(sk);
5494 l2cap_pi(sk)->amp_move_role =
5495 L2CAP_AMP_MOVE_NONE;
5496 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5497 }
5498
5499 /* Other amp move states imply that the move
5500 * has already aborted
5501 */
5502 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5503 L2CAP_MOVE_CHAN_UNCONFIRMED);
5504 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5505 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005506 ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005507 ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005508 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005509 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005510 if (ampchan) {
5511 if (hci_chan_put(ampchan))
5512 ampcon->l2cap_data = NULL;
5513 else
5514 l2cap_deaggregate(ampchan, pi);
5515 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005516 }
5517
5518 release_sock(sk);
5519 return 0;
5520}
5521
5522static void l2cap_logical_link_worker(struct work_struct *work)
5523{
5524 struct l2cap_logical_link_work *log_link_work =
5525 container_of(work, struct l2cap_logical_link_work, work);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005526 struct sock *sk = log_link_work->chan->l2cap_sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005527
5528 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005529 sock_put(sk);
5530 hci_chan_put(log_link_work->chan);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005531 kfree(log_link_work);
5532}
5533
5534static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5535{
5536 struct l2cap_logical_link_work *amp_work;
5537
Peter Krystada8417e62012-03-21 16:58:17 -07005538 if (!chan->l2cap_sk) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005539 BT_ERR("Expected l2cap_sk to point to connecting socket");
5540 return -EFAULT;
5541 }
5542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005543 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005544 if (!amp_work) {
5545 sock_put(chan->l2cap_sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005546 return -ENOMEM;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005547 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005548
5549 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5550 amp_work->chan = chan;
5551 amp_work->status = status;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005552
5553 hci_chan_hold(chan);
5554
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005555 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5556 kfree(amp_work);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005557 sock_put(chan->l2cap_sk);
5558 hci_chan_put(chan);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005559 return -ENOMEM;
5560 }
5561
5562 return 0;
5563}
5564
5565int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5566{
5567 struct l2cap_conn *conn = chan->conn->l2cap_data;
5568
5569 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5570
5571 /* TODO: if failed status restore previous fs */
5572 return 0;
5573}
5574
5575int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5576{
5577 struct l2cap_chan_list *l;
5578 struct l2cap_conn *conn = chan->conn->l2cap_data;
5579 struct sock *sk;
5580
5581 BT_DBG("chan %p conn %p", chan, conn);
5582
5583 if (!conn)
5584 return 0;
5585
5586 l = &conn->chan_list;
5587
5588 read_lock(&l->lock);
5589
5590 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5591 bh_lock_sock(sk);
5592 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5593 if (l2cap_pi(sk)->ampchan == chan) {
Peter Krystaddcfeee22012-03-07 12:51:18 -08005594 struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005595 l2cap_pi(sk)->ampchan = NULL;
Peter Krystad1f8a8a52011-12-01 14:18:37 -08005596 l2cap_pi(sk)->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005597 if (hci_chan_put(chan))
5598 ampcon->l2cap_data = NULL;
5599 else
5600 l2cap_deaggregate(chan, l2cap_pi(sk));
Mat Martineau9f8d4672011-12-14 12:10:46 -08005601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005602 l2cap_amp_move_init(sk);
5603 }
5604 bh_unlock_sock(sk);
5605 }
5606
5607 read_unlock(&l->lock);
5608
5609 return 0;
5610
5611
5612}
5613
5614static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5615 u8 *data, struct sk_buff *skb)
5616{
5617 struct l2cap_amp_signal_work *amp_work;
5618
5619 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5620 if (!amp_work)
5621 return -ENOMEM;
5622
5623 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5624 amp_work->conn = conn;
5625 amp_work->cmd = *cmd;
5626 amp_work->data = data;
5627 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5628 if (!amp_work->skb) {
5629 kfree(amp_work);
5630 return -ENOMEM;
5631 }
5632
5633 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5634 kfree_skb(amp_work->skb);
5635 kfree(amp_work);
5636 return -ENOMEM;
5637 }
5638
5639 return 0;
5640}
5641
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005642static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005643 u16 to_multiplier)
5644{
5645 u16 max_latency;
5646
5647 if (min > max || min < 6 || max > 3200)
5648 return -EINVAL;
5649
5650 if (to_multiplier < 10 || to_multiplier > 3200)
5651 return -EINVAL;
5652
5653 if (max >= to_multiplier * 8)
5654 return -EINVAL;
5655
5656 max_latency = (to_multiplier * 8 / max) - 1;
5657 if (latency > 499 || latency > max_latency)
5658 return -EINVAL;
5659
5660 return 0;
5661}
5662
5663static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5664 struct l2cap_cmd_hdr *cmd, u8 *data)
5665{
5666 struct hci_conn *hcon = conn->hcon;
5667 struct l2cap_conn_param_update_req *req;
5668 struct l2cap_conn_param_update_rsp rsp;
5669 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005670 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005671
5672 if (!(hcon->link_mode & HCI_LM_MASTER))
5673 return -EINVAL;
5674
5675 cmd_len = __le16_to_cpu(cmd->len);
5676 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5677 return -EPROTO;
5678
5679 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005680 min = __le16_to_cpu(req->min);
5681 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005682 latency = __le16_to_cpu(req->latency);
5683 to_multiplier = __le16_to_cpu(req->to_multiplier);
5684
5685 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5686 min, max, latency, to_multiplier);
5687
5688 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005689
5690 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5691 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005692 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5693 else
5694 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5695
5696 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5697 sizeof(rsp), &rsp);
5698
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005699 if (!err)
5700 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5701
Claudio Takahaside731152011-02-11 19:28:55 -02005702 return 0;
5703}
5704
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005705static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005706 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5707 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005708{
5709 int err = 0;
5710
5711 switch (cmd->code) {
5712 case L2CAP_COMMAND_REJ:
5713 l2cap_command_rej(conn, cmd, data);
5714 break;
5715
5716 case L2CAP_CONN_REQ:
5717 err = l2cap_connect_req(conn, cmd, data);
5718 break;
5719
5720 case L2CAP_CONN_RSP:
5721 err = l2cap_connect_rsp(conn, cmd, data);
5722 break;
5723
5724 case L2CAP_CONF_REQ:
5725 err = l2cap_config_req(conn, cmd, cmd_len, data);
5726 break;
5727
5728 case L2CAP_CONF_RSP:
5729 err = l2cap_config_rsp(conn, cmd, data);
5730 break;
5731
5732 case L2CAP_DISCONN_REQ:
5733 err = l2cap_disconnect_req(conn, cmd, data);
5734 break;
5735
5736 case L2CAP_DISCONN_RSP:
5737 err = l2cap_disconnect_rsp(conn, cmd, data);
5738 break;
5739
5740 case L2CAP_ECHO_REQ:
5741 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5742 break;
5743
5744 case L2CAP_ECHO_RSP:
5745 break;
5746
5747 case L2CAP_INFO_REQ:
5748 err = l2cap_information_req(conn, cmd, data);
5749 break;
5750
5751 case L2CAP_INFO_RSP:
5752 err = l2cap_information_rsp(conn, cmd, data);
5753 break;
5754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005755 case L2CAP_CREATE_CHAN_REQ:
5756 err = l2cap_create_channel_req(conn, cmd, data);
5757 break;
5758
5759 case L2CAP_CREATE_CHAN_RSP:
5760 err = l2cap_create_channel_rsp(conn, cmd, data);
5761 break;
5762
5763 case L2CAP_MOVE_CHAN_REQ:
5764 case L2CAP_MOVE_CHAN_RSP:
5765 case L2CAP_MOVE_CHAN_CFM:
5766 case L2CAP_MOVE_CHAN_CFM_RSP:
5767 err = l2cap_sig_amp(conn, cmd, data, skb);
5768 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005769 default:
5770 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5771 err = -EINVAL;
5772 break;
5773 }
5774
5775 return err;
5776}
5777
5778static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5779 struct l2cap_cmd_hdr *cmd, u8 *data)
5780{
5781 switch (cmd->code) {
5782 case L2CAP_COMMAND_REJ:
5783 return 0;
5784
5785 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005786 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005787
5788 case L2CAP_CONN_PARAM_UPDATE_RSP:
5789 return 0;
5790
5791 default:
5792 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5793 return -EINVAL;
5794 }
5795}
5796
5797static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5798 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799{
5800 u8 *data = skb->data;
5801 int len = skb->len;
5802 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005803 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804
5805 l2cap_raw_recv(conn, skb);
5806
5807 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005808 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5810 data += L2CAP_CMD_HDR_SIZE;
5811 len -= L2CAP_CMD_HDR_SIZE;
5812
Al Viro88219a02007-07-29 00:17:25 -07005813 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814
Al Viro88219a02007-07-29 00:17:25 -07005815 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005816
Al Viro88219a02007-07-29 00:17:25 -07005817 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818 BT_DBG("corrupted command");
5819 break;
5820 }
5821
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005822 if (conn->hcon->type == LE_LINK)
5823 err = l2cap_le_sig_cmd(conn, &cmd, data);
5824 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005825 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5826 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827
5828 if (err) {
5829 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005830
5831 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832
5833 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005834 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5836 }
5837
Al Viro88219a02007-07-29 00:17:25 -07005838 data += cmd_len;
5839 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840 }
5841
5842 kfree_skb(skb);
5843}
5844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005845static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005846{
5847 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005848 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005850 if (pi->extended_control)
5851 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5852 else
5853 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5854
5855 if (pi->fcs == L2CAP_FCS_CRC16) {
5856 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005857 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5858 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005860 if (our_fcs != rcv_fcs) {
5861 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005862 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005863 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005864 }
5865 return 0;
5866}
5867
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005868static void l2cap_ertm_pass_to_tx(struct sock *sk,
5869 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005870{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005871 BT_DBG("sk %p, control %p", sk, control);
5872 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005873}
5874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005875static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5876 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005877{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005878 BT_DBG("sk %p, control %p", sk, control);
5879 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5880}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005882static void l2cap_ertm_resend(struct sock *sk)
5883{
5884 struct bt_l2cap_control control;
5885 struct l2cap_pinfo *pi;
5886 struct sk_buff *skb;
5887 struct sk_buff *tx_skb;
5888 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005890 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005892 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005894 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5895 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005896
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005897 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5898 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5899 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005901 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5902 seq = l2cap_seq_list_pop(&pi->retrans_list);
5903
5904 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5905 if (!skb) {
5906 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5907 (int) seq);
5908 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005909 }
5910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005911 bt_cb(skb)->retries += 1;
5912 control = bt_cb(skb)->control;
5913
5914 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5915 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5916 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5917 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005918 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005919 }
5920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005921 control.reqseq = pi->buffer_seq;
5922 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5923 control.final = 1;
5924 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5925 } else {
5926 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005927 }
5928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005929 if (skb_cloned(skb)) {
5930 /* Cloned sk_buffs are read-only, so we need a
5931 * writeable copy
5932 */
5933 tx_skb = skb_copy(skb, GFP_ATOMIC);
5934 } else {
5935 tx_skb = skb_clone(skb, GFP_ATOMIC);
5936 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005937
Mat Martineau0c04ef92011-12-07 16:41:22 -08005938 if (!tx_skb) {
5939 l2cap_seq_list_clear(&pi->retrans_list);
5940 break;
5941 }
5942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005943 /* Update skb contents */
5944 if (pi->extended_control) {
5945 put_unaligned_le32(__pack_extended_control(&control),
5946 tx_skb->data + L2CAP_HDR_SIZE);
5947 } else {
5948 put_unaligned_le16(__pack_enhanced_control(&control),
5949 tx_skb->data + L2CAP_HDR_SIZE);
5950 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005952 if (pi->fcs == L2CAP_FCS_CRC16)
5953 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005954
Mat Martineau2f0cd842011-10-20 14:34:26 -07005955 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005956 tx_skb->sk = sk;
5957 tx_skb->destructor = l2cap_skb_destructor;
5958 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005960 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005962 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005963
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005964 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005965 }
5966}
5967
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005968static inline void l2cap_ertm_retransmit(struct sock *sk,
5969 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005970{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005971 BT_DBG("sk %p, control %p", sk, control);
5972
5973 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5974 l2cap_ertm_resend(sk);
5975}
5976
5977static void l2cap_ertm_retransmit_all(struct sock *sk,
5978 struct bt_l2cap_control *control)
5979{
5980 struct l2cap_pinfo *pi;
5981 struct sk_buff *skb;
5982
5983 BT_DBG("sk %p, control %p", sk, control);
5984
5985 pi = l2cap_pi(sk);
5986
5987 if (control->poll)
5988 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5989
5990 l2cap_seq_list_clear(&pi->retrans_list);
5991
5992 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5993 return;
5994
5995 if (pi->unacked_frames) {
5996 skb_queue_walk(TX_QUEUE(sk), skb) {
5997 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5998 skb == sk->sk_send_head)
5999 break;
6000 }
6001
6002 skb_queue_walk_from(TX_QUEUE(sk), skb) {
6003 if (skb == sk->sk_send_head)
6004 break;
6005
6006 l2cap_seq_list_append(&pi->retrans_list,
6007 bt_cb(skb)->control.txseq);
6008 }
6009
6010 l2cap_ertm_resend(sk);
6011 }
6012}
6013
6014static inline void append_skb_frag(struct sk_buff *skb,
6015 struct sk_buff *new_frag, struct sk_buff **last_frag)
6016{
6017 /* skb->len reflects data in skb as well as all fragments
6018 skb->data_len reflects only data in fragments
6019 */
6020 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
6021
6022 if (!skb_has_frag_list(skb))
6023 skb_shinfo(skb)->frag_list = new_frag;
6024
6025 new_frag->next = NULL;
6026
6027 (*last_frag)->next = new_frag;
6028 *last_frag = new_frag;
6029
6030 skb->len += new_frag->len;
6031 skb->data_len += new_frag->len;
6032 skb->truesize += new_frag->truesize;
6033}
6034
6035static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
6036 struct bt_l2cap_control *control, struct sk_buff *skb)
6037{
6038 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006039 int err = -EINVAL;
6040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006041 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
6042 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03006043
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006044 if (!control)
6045 return err;
6046
6047 pi = l2cap_pi(sk);
6048
6049 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
6050 control->frame_type, control->sar, control->txseq,
6051 control->reqseq, control->final);
6052
6053 switch (control->sar) {
6054 case L2CAP_SAR_UNSEGMENTED:
6055 if (pi->sdu) {
6056 BT_DBG("Unexpected unsegmented PDU during reassembly");
6057 kfree_skb(pi->sdu);
6058 pi->sdu = NULL;
6059 pi->sdu_last_frag = NULL;
6060 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006061 }
6062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006063 BT_DBG("Unsegmented");
6064 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006065 break;
6066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006067 case L2CAP_SAR_START:
6068 if (pi->sdu) {
6069 BT_DBG("Unexpected start PDU during reassembly");
6070 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006071 }
6072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006073 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006074 skb_pull(skb, 2);
6075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006076 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03006077 err = -EMSGSIZE;
6078 break;
6079 }
6080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006081 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006082 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006084 pi->sdu = skb;
6085 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006087 BT_DBG("Start");
6088
6089 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006090 err = 0;
6091 break;
6092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006093 case L2CAP_SAR_CONTINUE:
6094 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006095 break;
6096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006097 append_skb_frag(pi->sdu, skb,
6098 &pi->sdu_last_frag);
6099 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006101 if (pi->sdu->len >= pi->sdu_len)
6102 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006104 BT_DBG("Continue, reassembled %d", pi->sdu->len);
6105
6106 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006107 break;
6108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006109 case L2CAP_SAR_END:
6110 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006111 break;
6112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006113 append_skb_frag(pi->sdu, skb,
6114 &pi->sdu_last_frag);
6115 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006117 if (pi->sdu->len != pi->sdu_len)
6118 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006120 BT_DBG("End, reassembled %d", pi->sdu->len);
6121 /* If the sender used tiny PDUs, the rcv queuing could fail.
6122 * Applications that have issues here should use a larger
6123 * sk_rcvbuf.
6124 */
6125 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03006126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006127 if (!err) {
6128 /* Reassembly complete */
6129 pi->sdu = NULL;
6130 pi->sdu_last_frag = NULL;
6131 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006132 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006133 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006135 default:
6136 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006137 break;
6138 }
6139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006140 if (err) {
6141 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
6142 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
6143 if (pi->sdu) {
6144 kfree_skb(pi->sdu);
6145 pi->sdu = NULL;
6146 }
6147 pi->sdu_last_frag = NULL;
6148 pi->sdu_len = 0;
6149 if (skb)
6150 kfree_skb(skb);
6151 }
6152
6153 /* Update local busy state */
6154 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
6155 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
6156
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006157 return err;
6158}
6159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006160static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006161{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006162 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006163 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
6164 * until a gap is encountered.
6165 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006167 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006169 BT_DBG("sk %p", sk);
6170 pi = l2cap_pi(sk);
6171
6172 while (l2cap_rmem_available(sk)) {
6173 struct sk_buff *skb;
6174 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6175 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6176
6177 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6178
6179 if (!skb)
6180 break;
6181
6182 skb_unlink(skb, SREJ_QUEUE(sk));
6183 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6184 err = l2cap_ertm_rx_expected_iframe(sk,
6185 &bt_cb(skb)->control, skb);
6186 if (err)
6187 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006188 }
6189
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006190 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6191 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6192 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006193 }
6194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006195 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006196}
6197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006198static void l2cap_ertm_handle_srej(struct sock *sk,
6199 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006200{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006201 struct l2cap_pinfo *pi;
6202 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006204 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006206 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006208 if (control->reqseq == pi->next_tx_seq) {
6209 BT_DBG("Invalid reqseq %d, disconnecting",
6210 (int) control->reqseq);
6211 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006212 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006213 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006215 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006217 if (skb == NULL) {
6218 BT_DBG("Seq %d not available for retransmission",
6219 (int) control->reqseq);
6220 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006221 }
6222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006223 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6224 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6225 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6226 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006227 }
6228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006229 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006231 if (control->poll) {
6232 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006233
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006234 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6235 l2cap_ertm_retransmit(sk, control);
6236 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006238 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6239 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6240 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006241 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006242 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006243 l2cap_ertm_pass_to_tx_fbit(sk, control);
6244
6245 if (control->final) {
6246 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6247 (pi->srej_save_reqseq == control->reqseq)) {
6248 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6249 } else {
6250 l2cap_ertm_retransmit(sk, control);
6251 }
6252 } else {
6253 l2cap_ertm_retransmit(sk, control);
6254 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6255 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6256 pi->srej_save_reqseq = control->reqseq;
6257 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006258 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006259 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006260}
6261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006262static void l2cap_ertm_handle_rej(struct sock *sk,
6263 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006264{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006265 struct l2cap_pinfo *pi;
6266 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006268 BT_DBG("sk %p, control %p", sk, control);
6269
6270 pi = l2cap_pi(sk);
6271
6272 if (control->reqseq == pi->next_tx_seq) {
6273 BT_DBG("Invalid reqseq %d, disconnecting",
6274 (int) control->reqseq);
6275 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6276 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 }
6278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006279 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006281 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6282 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6283 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6284 return;
6285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006287 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6288
6289 l2cap_ertm_pass_to_tx(sk, control);
6290
6291 if (control->final) {
6292 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6293 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6294 else
6295 l2cap_ertm_retransmit_all(sk, control);
6296 } else {
6297 l2cap_ertm_retransmit_all(sk, control);
6298 l2cap_ertm_send(sk);
6299 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6300 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6301 }
6302}
6303
6304static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6305{
6306 struct l2cap_pinfo *pi;
6307
6308 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6309 pi = l2cap_pi(sk);
6310
6311 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6312 (int)pi->expected_tx_seq);
6313
6314 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6315 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6316 /* See notes below regarding "double poll" and
6317 * invalid packets.
6318 */
6319 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6320 BT_DBG("Invalid/Ignore - txseq outside "
6321 "tx window after SREJ sent");
6322 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6323 } else {
6324 BT_DBG("Invalid - bad txseq within tx "
6325 "window after SREJ sent");
6326 return L2CAP_ERTM_TXSEQ_INVALID;
6327 }
6328 }
6329
6330 if (pi->srej_list.head == txseq) {
6331 BT_DBG("Expected SREJ");
6332 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6333 }
6334
6335 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6336 BT_DBG("Duplicate SREJ - txseq already stored");
6337 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6338 }
6339
6340 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6341 BT_DBG("Unexpected SREJ - txseq not requested "
6342 "with SREJ");
6343 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6344 }
6345 }
6346
6347 if (pi->expected_tx_seq == txseq) {
6348 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6349 BT_DBG("Invalid - txseq outside tx window");
6350 return L2CAP_ERTM_TXSEQ_INVALID;
6351 } else {
6352 BT_DBG("Expected");
6353 return L2CAP_ERTM_TXSEQ_EXPECTED;
6354 }
6355 }
6356
6357 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6358 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6359 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6360 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6361 }
6362
6363 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6364 /* A source of invalid packets is a "double poll" condition,
6365 * where delays cause us to send multiple poll packets. If
6366 * the remote stack receives and processes both polls,
6367 * sequence numbers can wrap around in such a way that a
6368 * resent frame has a sequence number that looks like new data
6369 * with a sequence gap. This would trigger an erroneous SREJ
6370 * request.
6371 *
6372 * Fortunately, this is impossible with a tx window that's
6373 * less than half of the maximum sequence number, which allows
6374 * invalid frames to be safely ignored.
6375 *
6376 * With tx window sizes greater than half of the tx window
6377 * maximum, the frame is invalid and cannot be ignored. This
6378 * causes a disconnect.
6379 */
6380
6381 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6382 BT_DBG("Invalid/Ignore - txseq outside tx window");
6383 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6384 } else {
6385 BT_DBG("Invalid - txseq outside tx window");
6386 return L2CAP_ERTM_TXSEQ_INVALID;
6387 }
6388 } else {
6389 BT_DBG("Unexpected - txseq indicates missing frames");
6390 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6391 }
6392}
6393
6394static int l2cap_ertm_rx_state_recv(struct sock *sk,
6395 struct bt_l2cap_control *control,
6396 struct sk_buff *skb, u8 event)
6397{
6398 struct l2cap_pinfo *pi;
6399 int err = 0;
6400 bool skb_in_use = 0;
6401
6402 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6403 (int)event);
6404 pi = l2cap_pi(sk);
6405
6406 switch (event) {
6407 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6408 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6409 case L2CAP_ERTM_TXSEQ_EXPECTED:
6410 l2cap_ertm_pass_to_tx(sk, control);
6411
6412 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6413 BT_DBG("Busy, discarding expected seq %d",
6414 control->txseq);
6415 break;
6416 }
6417
6418 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6419 pi->buffer_seq = pi->expected_tx_seq;
6420 skb_in_use = 1;
6421
6422 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6423 if (err)
6424 break;
6425
6426 if (control->final) {
6427 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6428 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6429 else {
6430 control->final = 0;
6431 l2cap_ertm_retransmit_all(sk, control);
6432 l2cap_ertm_send(sk);
6433 }
6434 }
6435
6436 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6437 l2cap_ertm_send_ack(sk);
6438 break;
6439 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6440 l2cap_ertm_pass_to_tx(sk, control);
6441
6442 /* Can't issue SREJ frames in the local busy state.
6443 * Drop this frame, it will be seen as missing
6444 * when local busy is exited.
6445 */
6446 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6447 BT_DBG("Busy, discarding unexpected seq %d",
6448 control->txseq);
6449 break;
6450 }
6451
6452 /* There was a gap in the sequence, so an SREJ
6453 * must be sent for each missing frame. The
6454 * current frame is stored for later use.
6455 */
6456 skb_queue_tail(SREJ_QUEUE(sk), skb);
6457 skb_in_use = 1;
6458 BT_DBG("Queued %p (queue len %d)", skb,
6459 skb_queue_len(SREJ_QUEUE(sk)));
6460
6461 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6462 l2cap_seq_list_clear(&pi->srej_list);
6463 l2cap_ertm_send_srej(sk, control->txseq);
6464
6465 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6466 break;
6467 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6468 l2cap_ertm_pass_to_tx(sk, control);
6469 break;
6470 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6471 break;
6472 case L2CAP_ERTM_TXSEQ_INVALID:
6473 default:
6474 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6475 ECONNRESET);
6476 break;
6477 }
6478 break;
6479 case L2CAP_ERTM_EVENT_RECV_RR:
6480 l2cap_ertm_pass_to_tx(sk, control);
6481 if (control->final) {
6482 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6483
6484 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6485 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6486 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6487 pi->amp_move_state ==
6488 L2CAP_AMP_STATE_WAIT_PREPARE) {
6489 control->final = 0;
6490 l2cap_ertm_retransmit_all(sk, control);
6491 }
6492
6493 l2cap_ertm_send(sk);
6494 } else if (control->poll) {
6495 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6496 } else {
6497 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6498 pi->unacked_frames)
6499 l2cap_ertm_start_retrans_timer(pi);
6500 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6501 l2cap_ertm_send(sk);
6502 }
6503 break;
6504 case L2CAP_ERTM_EVENT_RECV_RNR:
6505 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6506 l2cap_ertm_pass_to_tx(sk, control);
6507 if (control && control->poll) {
6508 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6509 l2cap_ertm_send_rr_or_rnr(sk, 0);
6510 }
6511 l2cap_ertm_stop_retrans_timer(pi);
6512 l2cap_seq_list_clear(&pi->retrans_list);
6513 break;
6514 case L2CAP_ERTM_EVENT_RECV_REJ:
6515 l2cap_ertm_handle_rej(sk, control);
6516 break;
6517 case L2CAP_ERTM_EVENT_RECV_SREJ:
6518 l2cap_ertm_handle_srej(sk, control);
6519 break;
6520 default:
6521 break;
6522 }
6523
6524 if (skb && !skb_in_use) {
6525 BT_DBG("Freeing %p", skb);
6526 kfree_skb(skb);
6527 }
6528
6529 return err;
6530}
6531
6532static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6533 struct bt_l2cap_control *control,
6534 struct sk_buff *skb, u8 event)
6535{
6536 struct l2cap_pinfo *pi;
6537 int err = 0;
6538 u16 txseq = control->txseq;
6539 bool skb_in_use = 0;
6540
6541 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6542 (int)event);
6543 pi = l2cap_pi(sk);
6544
6545 switch (event) {
6546 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6547 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6548 case L2CAP_ERTM_TXSEQ_EXPECTED:
6549 /* Keep frame for reassembly later */
6550 l2cap_ertm_pass_to_tx(sk, control);
6551 skb_queue_tail(SREJ_QUEUE(sk), skb);
6552 skb_in_use = 1;
6553 BT_DBG("Queued %p (queue len %d)", skb,
6554 skb_queue_len(SREJ_QUEUE(sk)));
6555
6556 pi->expected_tx_seq = __next_seq(txseq, pi);
6557 break;
6558 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6559 l2cap_seq_list_pop(&pi->srej_list);
6560
6561 l2cap_ertm_pass_to_tx(sk, control);
6562 skb_queue_tail(SREJ_QUEUE(sk), skb);
6563 skb_in_use = 1;
6564 BT_DBG("Queued %p (queue len %d)", skb,
6565 skb_queue_len(SREJ_QUEUE(sk)));
6566
6567 err = l2cap_ertm_rx_queued_iframes(sk);
6568 if (err)
6569 break;
6570
6571 break;
6572 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6573 /* Got a frame that can't be reassembled yet.
6574 * Save it for later, and send SREJs to cover
6575 * the missing frames.
6576 */
6577 skb_queue_tail(SREJ_QUEUE(sk), skb);
6578 skb_in_use = 1;
6579 BT_DBG("Queued %p (queue len %d)", skb,
6580 skb_queue_len(SREJ_QUEUE(sk)));
6581
6582 l2cap_ertm_pass_to_tx(sk, control);
6583 l2cap_ertm_send_srej(sk, control->txseq);
6584 break;
6585 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6586 /* This frame was requested with an SREJ, but
6587 * some expected retransmitted frames are
6588 * missing. Request retransmission of missing
6589 * SREJ'd frames.
6590 */
6591 skb_queue_tail(SREJ_QUEUE(sk), skb);
6592 skb_in_use = 1;
6593 BT_DBG("Queued %p (queue len %d)", skb,
6594 skb_queue_len(SREJ_QUEUE(sk)));
6595
6596 l2cap_ertm_pass_to_tx(sk, control);
6597 l2cap_ertm_send_srej_list(sk, control->txseq);
6598 break;
6599 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6600 /* We've already queued this frame. Drop this copy. */
6601 l2cap_ertm_pass_to_tx(sk, control);
6602 break;
6603 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6604 /* Expecting a later sequence number, so this frame
6605 * was already received. Ignore it completely.
6606 */
6607 break;
6608 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6609 break;
6610 case L2CAP_ERTM_TXSEQ_INVALID:
6611 default:
6612 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6613 ECONNRESET);
6614 break;
6615 }
6616 break;
6617 case L2CAP_ERTM_EVENT_RECV_RR:
6618 l2cap_ertm_pass_to_tx(sk, control);
6619 if (control->final) {
6620 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6621
6622 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6623 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6624 else {
6625 control->final = 0;
6626 l2cap_ertm_retransmit_all(sk, control);
6627 }
6628
6629 l2cap_ertm_send(sk);
6630 } else if (control->poll) {
6631 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6632 pi->unacked_frames) {
6633 l2cap_ertm_start_retrans_timer(pi);
6634 }
6635 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6636 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6637 l2cap_ertm_send_srej_tail(sk);
6638 } else {
6639 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6640 pi->unacked_frames) {
6641 l2cap_ertm_start_retrans_timer(pi);
6642 }
6643 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6644 l2cap_ertm_send_ack(sk);
6645 }
6646 break;
6647 case L2CAP_ERTM_EVENT_RECV_RNR:
6648 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6649 l2cap_ertm_pass_to_tx(sk, control);
6650 if (control->poll)
6651 l2cap_ertm_send_srej_tail(sk);
6652 else {
6653 struct bt_l2cap_control rr_control;
6654 memset(&rr_control, 0, sizeof(rr_control));
6655 rr_control.frame_type = 's';
6656 rr_control.super = L2CAP_SFRAME_RR;
6657 rr_control.reqseq = pi->buffer_seq;
6658 l2cap_ertm_send_sframe(sk, &rr_control);
6659 }
6660
6661 break;
6662 case L2CAP_ERTM_EVENT_RECV_REJ:
6663 l2cap_ertm_handle_rej(sk, control);
6664 break;
6665 case L2CAP_ERTM_EVENT_RECV_SREJ:
6666 l2cap_ertm_handle_srej(sk, control);
6667 break;
6668 }
6669
6670 if (skb && !skb_in_use) {
6671 BT_DBG("Freeing %p", skb);
6672 kfree_skb(skb);
6673 }
6674
6675 return err;
6676}
6677
6678static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6679 struct bt_l2cap_control *control,
6680 struct sk_buff *skb, u8 event)
6681{
6682 struct l2cap_pinfo *pi;
6683 int err = 0;
6684 bool skb_in_use = 0;
6685
6686 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6687 (int)event);
6688 pi = l2cap_pi(sk);
6689
6690 /* Only handle expected frames, to avoid state changes. */
6691
6692 switch (event) {
6693 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6694 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6695 L2CAP_ERTM_TXSEQ_EXPECTED) {
6696 l2cap_ertm_pass_to_tx(sk, control);
6697
6698 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6699 BT_DBG("Busy, discarding expected seq %d",
6700 control->txseq);
6701 break;
6702 }
6703
6704 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6705 pi->buffer_seq = pi->expected_tx_seq;
6706 skb_in_use = 1;
6707
6708 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6709 if (err)
6710 break;
6711
6712 if (control->final) {
6713 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6714 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6715 else
6716 control->final = 0;
6717 }
6718 }
6719 break;
6720 case L2CAP_ERTM_EVENT_RECV_RR:
6721 case L2CAP_ERTM_EVENT_RECV_RNR:
6722 case L2CAP_ERTM_EVENT_RECV_REJ:
6723 l2cap_ertm_process_reqseq(sk, control->reqseq);
6724 break;
6725 case L2CAP_ERTM_EVENT_RECV_SREJ:
6726 /* Ignore */
6727 break;
6728 default:
6729 break;
6730 }
6731
6732 if (skb && !skb_in_use) {
6733 BT_DBG("Freeing %p", skb);
6734 kfree_skb(skb);
6735 }
6736
6737 return err;
6738}
6739
6740static int l2cap_answer_move_poll(struct sock *sk)
6741{
6742 struct l2cap_pinfo *pi;
6743 struct bt_l2cap_control control;
6744 int err = 0;
6745
6746 BT_DBG("sk %p", sk);
6747
6748 pi = l2cap_pi(sk);
6749
6750 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6751
6752 if (!skb_queue_empty(TX_QUEUE(sk)))
6753 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6754 else
6755 sk->sk_send_head = NULL;
6756
6757 /* Rewind next_tx_seq to the point expected
6758 * by the receiver.
6759 */
6760 pi->next_tx_seq = pi->amp_move_reqseq;
6761 pi->unacked_frames = 0;
6762
6763 err = l2cap_finish_amp_move(sk);
6764
6765 if (err)
6766 return err;
6767
6768 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6769 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6770
6771 memset(&control, 0, sizeof(control));
6772 control.reqseq = pi->amp_move_reqseq;
6773
6774 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6775 err = -EPROTO;
6776 else
6777 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6778 pi->amp_move_event);
6779
6780 return err;
6781}
6782
6783static void l2cap_amp_move_setup(struct sock *sk)
6784{
6785 struct l2cap_pinfo *pi;
6786 struct sk_buff *skb;
6787
6788 BT_DBG("sk %p", sk);
6789
6790 pi = l2cap_pi(sk);
6791
6792 l2cap_ertm_stop_ack_timer(pi);
6793 l2cap_ertm_stop_retrans_timer(pi);
6794 l2cap_ertm_stop_monitor_timer(pi);
6795
6796 pi->retry_count = 0;
6797 skb_queue_walk(TX_QUEUE(sk), skb) {
6798 if (bt_cb(skb)->retries)
6799 bt_cb(skb)->retries = 1;
6800 else
6801 break;
6802 }
6803
6804 pi->expected_tx_seq = pi->buffer_seq;
6805
6806 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6807 l2cap_seq_list_clear(&pi->retrans_list);
6808 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6809 skb_queue_purge(SREJ_QUEUE(sk));
6810
6811 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6812 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6813
6814 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6815 pi->rx_state);
6816
6817 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6818}
6819
6820static void l2cap_amp_move_revert(struct sock *sk)
6821{
6822 struct l2cap_pinfo *pi;
6823
6824 BT_DBG("sk %p", sk);
6825
6826 pi = l2cap_pi(sk);
6827
6828 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6829 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6830 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6831 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6832 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6833}
6834
6835static int l2cap_amp_move_reconf(struct sock *sk)
6836{
6837 struct l2cap_pinfo *pi;
6838 u8 buf[64];
6839 int err = 0;
6840
6841 BT_DBG("sk %p", sk);
6842
6843 pi = l2cap_pi(sk);
6844
6845 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6846 l2cap_build_amp_reconf_req(sk, buf), buf);
6847 return err;
6848}
6849
6850static void l2cap_amp_move_success(struct sock *sk)
6851{
6852 struct l2cap_pinfo *pi;
6853
6854 BT_DBG("sk %p", sk);
6855
6856 pi = l2cap_pi(sk);
6857
6858 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6859 int err = 0;
6860 /* Send reconfigure request */
6861 if (pi->mode == L2CAP_MODE_ERTM) {
6862 pi->reconf_state = L2CAP_RECONF_INT;
6863 if (enable_reconfig)
6864 err = l2cap_amp_move_reconf(sk);
6865
6866 if (err || !enable_reconfig) {
6867 pi->reconf_state = L2CAP_RECONF_NONE;
6868 l2cap_ertm_tx(sk, NULL, NULL,
6869 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6870 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6871 }
6872 } else
6873 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6874 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6875 if (pi->mode == L2CAP_MODE_ERTM)
6876 pi->rx_state =
6877 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6878 else
6879 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6880 }
6881}
6882
6883static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6884{
6885 /* Make sure reqseq is for a packet that has been sent but not acked */
6886 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6887 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6888}
6889
6890static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6891 struct sk_buff *skb)
6892{
6893 struct l2cap_pinfo *pi;
6894 int err = 0;
6895
6896 BT_DBG("sk %p, control %p, skb %p, state %d",
6897 sk, control, skb, l2cap_pi(sk)->rx_state);
6898
6899 pi = l2cap_pi(sk);
6900
6901 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6902 L2CAP_ERTM_TXSEQ_EXPECTED) {
6903 l2cap_ertm_pass_to_tx(sk, control);
6904
6905 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6906 __next_seq(pi->buffer_seq, pi));
6907
6908 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6909
6910 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6911 } else {
6912 if (pi->sdu) {
6913 kfree_skb(pi->sdu);
6914 pi->sdu = NULL;
6915 }
6916 pi->sdu_last_frag = NULL;
6917 pi->sdu_len = 0;
6918
6919 if (skb) {
6920 BT_DBG("Freeing %p", skb);
6921 kfree_skb(skb);
6922 }
6923 }
6924
6925 pi->last_acked_seq = control->txseq;
6926 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6927
6928 return err;
6929}
6930
6931static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6932 struct sk_buff *skb, u8 event)
6933{
6934 struct l2cap_pinfo *pi;
6935 int err = 0;
6936
6937 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6938 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6939
6940 pi = l2cap_pi(sk);
6941
6942 if (__valid_reqseq(pi, control->reqseq)) {
6943 switch (pi->rx_state) {
6944 case L2CAP_ERTM_RX_STATE_RECV:
6945 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6946 break;
6947 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6948 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6949 event);
6950 break;
6951 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6952 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6953 event);
6954 break;
6955 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6956 if (control->final) {
6957 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6958 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6959
6960 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6961 l2cap_ertm_process_reqseq(sk, control->reqseq);
6962
6963 if (!skb_queue_empty(TX_QUEUE(sk)))
6964 sk->sk_send_head =
6965 skb_peek(TX_QUEUE(sk));
6966 else
6967 sk->sk_send_head = NULL;
6968
6969 /* Rewind next_tx_seq to the point expected
6970 * by the receiver.
6971 */
6972 pi->next_tx_seq = control->reqseq;
6973 pi->unacked_frames = 0;
6974
6975 if (pi->ampcon)
6976 pi->conn->mtu =
6977 pi->ampcon->hdev->acl_mtu;
6978 else
6979 pi->conn->mtu =
6980 pi->conn->hcon->hdev->acl_mtu;
6981
6982 err = l2cap_setup_resegment(sk);
6983
6984 if (err)
6985 break;
6986
6987 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6988 event);
6989 }
6990 break;
6991 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6992 if (control->poll) {
6993 pi->amp_move_reqseq = control->reqseq;
6994 pi->amp_move_event = event;
6995 err = l2cap_answer_move_poll(sk);
6996 }
6997 break;
6998 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6999 if (control->poll) {
7000 pi->amp_move_reqseq = control->reqseq;
7001 pi->amp_move_event = event;
7002
7003 BT_DBG("amp_move_role 0x%2.2x, "
7004 "reconf_state 0x%2.2x",
7005 pi->amp_move_role, pi->reconf_state);
7006
7007 if (pi->reconf_state == L2CAP_RECONF_ACC)
7008 err = l2cap_amp_move_reconf(sk);
7009 else
7010 err = l2cap_answer_move_poll(sk);
7011 }
7012 break;
7013 default:
7014 /* shut it down */
7015 break;
7016 }
7017 } else {
7018 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7019 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
7020 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7021 }
7022
7023 return err;
7024}
7025
7026void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
7027{
7028 lock_sock(sk);
7029
7030 l2cap_pi(sk)->fixed_channel = 1;
7031
7032 l2cap_pi(sk)->imtu = opt->imtu;
7033 l2cap_pi(sk)->omtu = opt->omtu;
7034 l2cap_pi(sk)->remote_mps = opt->omtu;
7035 l2cap_pi(sk)->mps = opt->omtu;
7036 l2cap_pi(sk)->flush_to = opt->flush_to;
7037 l2cap_pi(sk)->mode = opt->mode;
7038 l2cap_pi(sk)->fcs = opt->fcs;
7039 l2cap_pi(sk)->max_tx = opt->max_tx;
7040 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
7041 l2cap_pi(sk)->tx_win = opt->txwin_size;
7042 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
7043 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
7044 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
7045
7046 if (opt->mode == L2CAP_MODE_ERTM ||
7047 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
7048 l2cap_ertm_init(sk);
7049
7050 release_sock(sk);
7051
7052 return;
7053}
7054
7055static const u8 l2cap_ertm_rx_func_to_event[4] = {
7056 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
7057 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
7058};
7059
7060int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
7061{
7062 struct l2cap_pinfo *pi;
7063 struct bt_l2cap_control *control;
7064 u16 len;
7065 u8 event;
7066 pi = l2cap_pi(sk);
7067
7068 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
7069
7070 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007071 goto drop;
7072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007073 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007074 case L2CAP_MODE_BASIC:
7075 /* If socket recv buffers overflows we drop data here
7076 * which is *bad* because L2CAP has to be reliable.
7077 * But we don't have any other choice. L2CAP doesn't
7078 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007080 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007081 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007083 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007084 goto done;
7085 break;
7086
7087 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007088 case L2CAP_MODE_STREAMING:
7089 control = &bt_cb(skb)->control;
7090 if (pi->extended_control) {
7091 __get_extended_control(get_unaligned_le32(skb->data),
7092 control);
7093 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007094 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007095 __get_enhanced_control(get_unaligned_le16(skb->data),
7096 control);
7097 skb_pull(skb, 2);
7098 }
7099
7100 len = skb->len;
7101
7102 if (l2cap_check_fcs(pi, skb))
7103 goto drop;
7104
7105 if ((control->frame_type == 'i') &&
7106 (control->sar == L2CAP_SAR_START))
7107 len -= 2;
7108
7109 if (pi->fcs == L2CAP_FCS_CRC16)
7110 len -= 2;
7111
7112 /*
7113 * We can just drop the corrupted I-frame here.
7114 * Receiver will miss it and start proper recovery
7115 * procedures and ask for retransmission.
7116 */
7117 if (len > pi->mps) {
7118 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7119 goto drop;
7120 }
7121
7122 if (control->frame_type == 'i') {
7123
7124 int err;
7125
7126 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7127 control->sar, control->reqseq, control->final,
7128 control->txseq);
7129
7130 /* Validate F-bit - F=0 always valid, F=1 only
7131 * valid in TX WAIT_F
7132 */
7133 if (control->final && (pi->tx_state !=
7134 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007135 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007136
7137 if (pi->mode != L2CAP_MODE_STREAMING) {
7138 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
7139 err = l2cap_ertm_rx(sk, control, skb, event);
7140 } else
7141 err = l2cap_strm_rx(sk, control, skb);
7142 if (err)
7143 l2cap_send_disconn_req(pi->conn, sk,
7144 ECONNRESET);
7145 } else {
7146 /* Only I-frames are expected in streaming mode */
7147 if (pi->mode == L2CAP_MODE_STREAMING)
7148 goto drop;
7149
7150 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7151 control->reqseq, control->final, control->poll,
7152 control->super);
7153
7154 if (len != 0) {
7155 l2cap_send_disconn_req(pi->conn, sk,
7156 ECONNRESET);
7157 goto drop;
7158 }
7159
7160 /* Validate F and P bits */
7161 if (control->final &&
7162 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
7163 || control->poll))
7164 goto drop;
7165
7166 event = l2cap_ertm_rx_func_to_event[control->super];
7167 if (l2cap_ertm_rx(sk, control, skb, event))
7168 l2cap_send_disconn_req(pi->conn, sk,
7169 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007170 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007171
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007172 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007173
7174 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007175 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007176 break;
7177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007178
7179drop:
7180 kfree_skb(skb);
7181
7182done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183 return 0;
7184}
7185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007186void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7187{
7188 lock_sock(sk);
7189 l2cap_data_channel(sk, skb);
7190 release_sock(sk);
7191}
7192
Al Viro8e036fc2007-07-29 00:16:36 -07007193static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007195 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007197 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7198 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199 goto drop;
7200
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007201 bh_lock_sock(sk);
7202
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203 BT_DBG("sk %p, len %d", sk, skb->len);
7204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007205 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007206 goto drop;
7207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007208 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007209 goto drop;
7210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007211 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 goto done;
7213
7214drop:
7215 kfree_skb(skb);
7216
7217done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007218 if (sk)
7219 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220 return 0;
7221}
7222
Brian Gix20de7cf2012-02-02 14:56:51 -08007223static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
7224 struct sk_buff *skb)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007225{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007226 struct sock *sk;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007227 struct sk_buff *skb_rsp;
7228 struct l2cap_hdr *lh;
Brian Gix20de7cf2012-02-02 14:56:51 -08007229 int dir;
Brian Gix8217b262012-01-31 07:56:04 -08007230 u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
Brian Gix7eaa64d2011-10-19 13:17:42 -07007231 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7232 L2CAP_ATT_NOT_SUPPORTED};
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007233
Brian Gix20de7cf2012-02-02 14:56:51 -08007234 dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
7235
7236 sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
7237 conn->dst, dir);
7238
7239 BT_DBG("sk %p, dir:%d", sk, dir);
7240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007241 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007242 goto drop;
7243
7244 bh_lock_sock(sk);
7245
7246 BT_DBG("sk %p, len %d", sk, skb->len);
7247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007248 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007249 goto drop;
7250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007251 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007252 goto drop;
7253
Brian Gix8217b262012-01-31 07:56:04 -08007254 if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
7255 skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
7256 GFP_ATOMIC);
7257 if (!skb_rsp)
7258 goto drop;
7259
7260 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7261 lh->len = cpu_to_le16(sizeof(mtu_rsp));
7262 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7263 memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
7264 sizeof(mtu_rsp));
7265 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7266
7267 goto free_skb;
7268 }
7269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007270 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007271 goto done;
7272
7273drop:
Brian Gix7eaa64d2011-10-19 13:17:42 -07007274 if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
7275 skb->data[0] != L2CAP_ATT_INDICATE)
7276 goto free_skb;
7277
7278 /* If this is an incoming PDU that requires a response, respond with
7279 * a generic error so remote device doesn't hang */
7280
7281 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7282 if (!skb_rsp)
7283 goto free_skb;
7284
7285 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7286 lh->len = cpu_to_le16(sizeof(err_rsp));
7287 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7288 err_rsp[1] = skb->data[0];
7289 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7290 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7291
7292free_skb:
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007293 kfree_skb(skb);
7294
7295done:
7296 if (sk)
7297 bh_unlock_sock(sk);
7298 return 0;
7299}
7300
Linus Torvalds1da177e2005-04-16 15:20:36 -07007301static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7302{
7303 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007304 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007305 u16 cid, len;
7306 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307
7308 skb_pull(skb, L2CAP_HDR_SIZE);
7309 cid = __le16_to_cpu(lh->cid);
7310 len = __le16_to_cpu(lh->len);
7311
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007312 if (len != skb->len) {
7313 kfree_skb(skb);
7314 return;
7315 }
7316
Linus Torvalds1da177e2005-04-16 15:20:36 -07007317 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7318
7319 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007320 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007321 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322 l2cap_sig_channel(conn, skb);
7323 break;
7324
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007325 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007326 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007327 skb_pull(skb, 2);
7328 l2cap_conless_channel(conn, psm, skb);
7329 break;
7330
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007331 case L2CAP_CID_LE_DATA:
7332 l2cap_att_channel(conn, cid, skb);
7333 break;
7334
Anderson Brigliaea370122011-06-07 18:46:31 -03007335 case L2CAP_CID_SMP:
7336 if (smp_sig_channel(conn, skb))
Mat Martineau3b9239a2012-02-16 11:54:30 -08007337 l2cap_conn_del(conn->hcon, EACCES, 0);
Anderson Brigliaea370122011-06-07 18:46:31 -03007338 break;
7339
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007341 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7342 if (sk) {
7343 if (sock_owned_by_user(sk)) {
7344 BT_DBG("backlog sk %p", sk);
7345 if (sk_add_backlog(sk, skb))
7346 kfree_skb(skb);
7347 } else
7348 l2cap_data_channel(sk, skb);
7349
7350 bh_unlock_sock(sk);
7351 } else if (cid == L2CAP_CID_A2MP) {
7352 BT_DBG("A2MP");
Peter Krystad072a51f2012-03-30 12:59:33 -07007353 amp_conn_ind(conn->hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007354 } else {
7355 BT_DBG("unknown cid 0x%4.4x", cid);
7356 kfree_skb(skb);
7357 }
7358
Linus Torvalds1da177e2005-04-16 15:20:36 -07007359 break;
7360 }
7361}
7362
7363/* ---- L2CAP interface with lower layer (HCI) ---- */
7364
7365static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7366{
7367 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007368 register struct sock *sk;
7369 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370
7371 if (type != ACL_LINK)
Mat Martineau8b51dd42012-02-13 10:38:24 -08007372 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
7374 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7375
7376 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007377 read_lock(&l2cap_sk_list.lock);
7378 sk_for_each(sk, node, &l2cap_sk_list.head) {
7379 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007380 continue;
7381
7382 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007383 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007384 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007385 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007387 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7388 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007389 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007390 lm2 |= HCI_LM_MASTER;
7391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007392 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007393 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394
7395 return exact ? lm1 : lm2;
7396}
7397
7398static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7399{
Marcel Holtmann01394182006-07-03 10:02:46 +02007400 struct l2cap_conn *conn;
7401
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7403
Ville Tervoacd7d372011-02-10 22:38:49 -03007404 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007405 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007406
7407 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408 conn = l2cap_conn_add(hcon, status);
7409 if (conn)
7410 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007411 } else
Mat Martineau3b9239a2012-02-16 11:54:30 -08007412 l2cap_conn_del(hcon, bt_err(status), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007413
7414 return 0;
7415}
7416
Marcel Holtmann2950f212009-02-12 14:02:50 +01007417static int l2cap_disconn_ind(struct hci_conn *hcon)
7418{
7419 struct l2cap_conn *conn = hcon->l2cap_data;
7420
7421 BT_DBG("hcon %p", hcon);
7422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007423 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007424 return 0x13;
7425
7426 return conn->disc_reason;
7427}
7428
Mat Martineau3b9239a2012-02-16 11:54:30 -08007429static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007430{
7431 BT_DBG("hcon %p reason %d", hcon, reason);
7432
Ville Tervoacd7d372011-02-10 22:38:49 -03007433 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007434 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435
Mat Martineau3b9239a2012-02-16 11:54:30 -08007436 l2cap_conn_del(hcon, bt_err(reason), is_process);
Marcel Holtmann01394182006-07-03 10:02:46 +02007437
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438 return 0;
7439}
7440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007441static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007442{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007443 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007444 return;
7445
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007446 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007447 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7448 l2cap_sock_clear_timer(sk);
7449 l2cap_sock_set_timer(sk, HZ * 5);
7450 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7451 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007452 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007453 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7454 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007455 }
7456}
7457
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007458static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007460 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007461 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007462 struct sock *sk;
Brian Gix20de7cf2012-02-02 14:56:51 -08007463 int smp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464
Marcel Holtmann01394182006-07-03 10:02:46 +02007465 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007468 l = &conn->chan_list;
7469
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470 BT_DBG("conn %p", conn);
7471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007472 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007474 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007475 bh_lock_sock(sk);
7476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007477 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007479 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gix416db902012-01-23 14:34:59 -08007480 if (!status && encrypt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007481 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gix416db902012-01-23 14:34:59 -08007482 l2cap_chan_ready(sk);
7483 }
Brian Gixa68668b2011-08-11 15:49:36 -07007484
Brian Gix20de7cf2012-02-02 14:56:51 -08007485 smp = 1;
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007486 bh_unlock_sock(sk);
7487 continue;
7488 }
7489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007490 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007491 bh_unlock_sock(sk);
7492 continue;
7493 }
7494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007495 if (!status && (sk->sk_state == BT_CONNECTED ||
7496 sk->sk_state == BT_CONFIG)) {
7497 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007498 bh_unlock_sock(sk);
7499 continue;
7500 }
7501
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007502 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007503 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007504 l2cap_pi(sk)->conf_state |=
7505 L2CAP_CONF_CONNECT_PEND;
7506 if (l2cap_pi(sk)->amp_pref ==
7507 BT_AMP_POLICY_PREFER_AMP) {
7508 amp_create_physical(l2cap_pi(sk)->conn,
7509 sk);
7510 } else
7511 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007512 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007513 l2cap_sock_clear_timer(sk);
7514 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007515 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007516 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007517 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007518 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007519
7520 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007521 if (l2cap_pi(sk)->amp_id) {
7522 amp_accept_physical(conn,
7523 l2cap_pi(sk)->amp_id, sk);
7524 bh_unlock_sock(sk);
7525 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007526 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007527
7528 sk->sk_state = BT_CONFIG;
7529 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007530 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007531 sk->sk_state = BT_DISCONN;
7532 l2cap_sock_set_timer(sk, HZ / 10);
7533 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007534 }
7535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007536 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7537 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7538 rsp.result = cpu_to_le16(result);
7539 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7540 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7541 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542 }
7543
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544 bh_unlock_sock(sk);
7545 }
7546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007547 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007548
Brian Gix20de7cf2012-02-02 14:56:51 -08007549 if (smp) {
7550 del_timer(&hcon->smp_timer);
7551 smp_link_encrypt_cmplt(conn, status, encrypt);
7552 }
7553
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554 return 0;
7555}
7556
7557static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7558{
7559 struct l2cap_conn *conn = hcon->l2cap_data;
7560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007561 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7562 goto drop;
7563
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007564 if (!conn)
7565 conn = l2cap_conn_add(hcon, 0);
7566
7567 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568 goto drop;
7569
7570 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007572 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 struct l2cap_hdr *hdr;
7574 int len;
7575
7576 if (conn->rx_len) {
7577 BT_ERR("Unexpected start frame (len %d)", skb->len);
7578 kfree_skb(conn->rx_skb);
7579 conn->rx_skb = NULL;
7580 conn->rx_len = 0;
7581 l2cap_conn_unreliable(conn, ECOMM);
7582 }
7583
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007584 /* Start fragment always begin with Basic L2CAP header */
7585 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 BT_ERR("Frame is too short (len %d)", skb->len);
7587 l2cap_conn_unreliable(conn, ECOMM);
7588 goto drop;
7589 }
7590
7591 hdr = (struct l2cap_hdr *) skb->data;
7592 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7593
7594 if (len == skb->len) {
7595 /* Complete frame received */
7596 l2cap_recv_frame(conn, skb);
7597 return 0;
7598 }
7599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007600 if (flags & ACL_CONT) {
7601 BT_ERR("Complete frame is incomplete "
7602 "(len %d, expected len %d)",
7603 skb->len, len);
7604 l2cap_conn_unreliable(conn, ECOMM);
7605 goto drop;
7606 }
7607
Linus Torvalds1da177e2005-04-16 15:20:36 -07007608 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7609
7610 if (skb->len > len) {
7611 BT_ERR("Frame is too long (len %d, expected len %d)",
7612 skb->len, len);
7613 l2cap_conn_unreliable(conn, ECOMM);
7614 goto drop;
7615 }
7616
7617 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007618 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7619 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007620 goto drop;
7621
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007622 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007623 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 conn->rx_len = len - skb->len;
7625 } else {
7626 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7627
7628 if (!conn->rx_len) {
7629 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7630 l2cap_conn_unreliable(conn, ECOMM);
7631 goto drop;
7632 }
7633
7634 if (skb->len > conn->rx_len) {
7635 BT_ERR("Fragment is too long (len %d, expected %d)",
7636 skb->len, conn->rx_len);
7637 kfree_skb(conn->rx_skb);
7638 conn->rx_skb = NULL;
7639 conn->rx_len = 0;
7640 l2cap_conn_unreliable(conn, ECOMM);
7641 goto drop;
7642 }
7643
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007644 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007645 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007646 conn->rx_len -= skb->len;
7647
7648 if (!conn->rx_len) {
7649 /* Complete frame received */
7650 l2cap_recv_frame(conn, conn->rx_skb);
7651 conn->rx_skb = NULL;
7652 }
7653 }
7654
7655drop:
7656 kfree_skb(skb);
7657 return 0;
7658}
7659
Srinivas Krovvidi10734192011-12-29 07:29:11 +05307660static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to)
7661{
7662 struct hci_cp_write_automatic_flush_timeout flush_tm;
7663 if (hcon && hcon->hdev) {
7664 flush_tm.handle = hcon->handle;
7665 if (flush_to == L2CAP_DEFAULT_FLUSH_TO)
7666 flush_to = 0;
7667 flush_tm.timeout = (flush_to < L2CAP_MAX_FLUSH_TO) ?
7668 flush_to : L2CAP_MAX_FLUSH_TO;
7669 hci_send_cmd(hcon->hdev,
7670 HCI_OP_WRITE_AUTOMATIC_FLUSH_TIMEOUT,
7671 4, &(flush_tm));
7672 }
7673}
7674
7675static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l)
7676{
7677 int ret_flush_to = L2CAP_DEFAULT_FLUSH_TO;
7678 struct sock *s;
7679 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
7680 if (l2cap_pi(s)->flush_to > 0 &&
7681 l2cap_pi(s)->flush_to < ret_flush_to)
7682 ret_flush_to = l2cap_pi(s)->flush_to;
7683 }
7684 return ret_flush_to;
7685}
7686
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007687static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007688{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007689 struct sock *sk;
7690 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007692 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007694 sk_for_each(sk, node, &l2cap_sk_list.head) {
7695 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007696
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007697 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007698 batostr(&bt_sk(sk)->src),
7699 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007700 sk->sk_state, __le16_to_cpu(pi->psm),
7701 pi->scid, pi->dcid,
7702 pi->imtu, pi->omtu, pi->sec_level,
7703 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007706 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007707
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007708 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709}
7710
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007711static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7712{
7713 return single_open(file, l2cap_debugfs_show, inode->i_private);
7714}
7715
7716static const struct file_operations l2cap_debugfs_fops = {
7717 .open = l2cap_debugfs_open,
7718 .read = seq_read,
7719 .llseek = seq_lseek,
7720 .release = single_release,
7721};
7722
7723static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007724
Linus Torvalds1da177e2005-04-16 15:20:36 -07007725static struct hci_proto l2cap_hci_proto = {
7726 .name = "L2CAP",
7727 .id = HCI_PROTO_L2CAP,
7728 .connect_ind = l2cap_connect_ind,
7729 .connect_cfm = l2cap_connect_cfm,
7730 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007731 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007732 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007733 .recv_acldata = l2cap_recv_acldata,
7734 .create_cfm = l2cap_create_cfm,
7735 .modify_cfm = l2cap_modify_cfm,
7736 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737};
7738
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007739int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007740{
7741 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007742
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007743 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007744 if (err < 0)
7745 return err;
7746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007747 _l2cap_wq = create_singlethread_workqueue("l2cap");
7748 if (!_l2cap_wq) {
7749 err = -ENOMEM;
7750 goto error;
7751 }
7752
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753 err = hci_register_proto(&l2cap_hci_proto);
7754 if (err < 0) {
7755 BT_ERR("L2CAP protocol registration failed");
7756 bt_sock_unregister(BTPROTO_L2CAP);
7757 goto error;
7758 }
7759
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007760 if (bt_debugfs) {
7761 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7762 bt_debugfs, NULL, &l2cap_debugfs_fops);
7763 if (!l2cap_debugfs)
7764 BT_ERR("Failed to create L2CAP debug file");
7765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007767 if (amp_init() < 0) {
7768 BT_ERR("AMP Manager initialization failed");
7769 goto error;
7770 }
7771
Linus Torvalds1da177e2005-04-16 15:20:36 -07007772 return 0;
7773
7774error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007775 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007776 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007777 return err;
7778}
7779
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007780void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007781{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007782 amp_exit();
7783
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007784 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007786 flush_workqueue(_l2cap_wq);
7787 destroy_workqueue(_l2cap_wq);
7788
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7790 BT_ERR("L2CAP protocol unregistration failed");
7791
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007792 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793}
7794
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007795module_param(disable_ertm, bool, 0644);
7796MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007797
7798module_param(enable_reconfig, bool, 0644);
7799MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");