blob: fd9088a4a54aefd1a51520a0e4bea669ecbe1cff [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bhakthavatsala Raghavendrab03b5702013-02-12 19:44:47 +05303 Copyright (c) 2000-2001, 2010-2013 The Linux Foundation. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliab501d6a2011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Rusty Russelleb939922011-12-19 14:08:01 +000061bool disable_ertm;
Peter Krystad1505bfa2012-06-08 10:47:27 -070062bool enable_hs;
Steve Mucklef132c6c2012-06-06 18:30:57 -070063bool enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020064
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070065static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Peter Krystad1505bfa2012-06-08 10:47:27 -070066static u8 l2cap_fc_mask = L2CAP_FC_L2CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068struct workqueue_struct *_l2cap_wq;
69
70struct bt_sock_list l2cap_sk_list = {
71 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
72};
73
74static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
75 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
76static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
77 struct l2cap_pinfo *pi, u16 icid, u16 result);
78static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
79 u16 icid, u16 result);
80
81static void l2cap_amp_move_setup(struct sock *sk);
82static void l2cap_amp_move_success(struct sock *sk);
83static void l2cap_amp_move_revert(struct sock *sk);
84
85static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Linus Torvalds1da177e2005-04-16 15:20:36 -070087static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
88 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089static int l2cap_answer_move_poll(struct sock *sk);
90static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
91static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
92static void l2cap_chan_ready(struct sock *sk);
Mat Martineau3b9239a2012-02-16 11:54:30 -080093static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process);
Srinivas Krovvidi10734192011-12-29 07:29:11 +053094static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l);
95static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to);
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -070096static void l2cap_queue_acl_data(struct work_struct *worker);
97static struct att_channel_parameters{
98 struct sk_buff *skb;
99 struct l2cap_conn *conn;
100 __le16 cid;
101 int dir;
102} att_chn_params;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Marcel Holtmann01394182006-07-03 10:02:46 +0200104/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200106{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 struct sock *s;
108 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
109 if (l2cap_pi(s)->dcid == cid)
110 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200111 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200113}
114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700115/* Find channel with given DCID.
116 * Returns locked socket */
117static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
118 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200119{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 struct sock *s;
121 read_lock(&l->lock);
122 s = __l2cap_get_chan_by_dcid(l, cid);
123 if (s)
124 bh_lock_sock(s);
125 read_unlock(&l->lock);
126 return s;
127}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130{
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200135 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200137}
138
139/* Find channel with given SCID.
140 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200142{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200150}
151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200153{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200158 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 return s;
160}
161
162static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163{
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171}
172
173static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
174 u16 seq)
175{
176 struct sk_buff *skb;
177
178 skb_queue_walk(head, skb) {
179 if (bt_cb(skb)->control.txseq == seq)
180 return skb;
181 }
182
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200183 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200184}
185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200187{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 u16 allocSize = 1;
189 int err = 0;
190 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 /* Actual allocated size must be a power of 2 */
193 while (allocSize && allocSize <= size)
194 allocSize <<= 1;
195 if (!allocSize)
196 return -ENOMEM;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
199 if (!seq_list->list)
200 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 seq_list->size = allocSize;
203 seq_list->mask = allocSize - 1;
204 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
205 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
206 for (i = 0; i < allocSize; i++)
207 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300209 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300210}
211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300213{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300215}
216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
218 u16 seq)
219{
220 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
221}
222
223static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
224{
225 u16 mask = seq_list->mask;
226
227 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
228
229 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
230 /* In case someone tries to pop the head of an empty list */
231 BT_DBG("List empty");
232 return L2CAP_SEQ_LIST_CLEAR;
233 } else if (seq_list->head == seq) {
234 /* Head can be removed quickly */
235 BT_DBG("Remove head");
236 seq_list->head = seq_list->list[seq & mask];
237 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
238
239 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
240 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
241 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
242 }
243 } else {
244 /* Non-head item must be found first */
245 u16 prev = seq_list->head;
246 BT_DBG("Find and remove");
247 while (seq_list->list[prev & mask] != seq) {
248 prev = seq_list->list[prev & mask];
249 if (prev == L2CAP_SEQ_LIST_TAIL) {
250 BT_DBG("seq %d not in list", (int) seq);
251 return L2CAP_SEQ_LIST_CLEAR;
252 }
253 }
254
255 seq_list->list[prev & mask] = seq_list->list[seq & mask];
256 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
257 if (seq_list->tail == seq)
258 seq_list->tail = prev;
259 }
260 return seq;
261}
262
263static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
264{
265 return l2cap_seq_list_remove(seq_list, seq_list->head);
266}
267
268static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
269{
270 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
271 u16 i;
272 for (i = 0; i < seq_list->size; i++)
273 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
274
275 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
276 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
277 }
278}
279
280static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
281{
282 u16 mask = seq_list->mask;
283
284 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
285
286 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
287 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
288 seq_list->head = seq;
289 else
290 seq_list->list[seq_list->tail & mask] = seq;
291
292 seq_list->tail = seq;
293 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
294 }
295}
296
297static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
298{
299 u16 packed;
300
301 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
302 L2CAP_CTRL_REQSEQ;
303 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
304 L2CAP_CTRL_FINAL;
305
306 if (control->frame_type == 's') {
307 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
308 L2CAP_CTRL_POLL;
309 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
310 L2CAP_CTRL_SUPERVISE;
311 packed |= L2CAP_CTRL_FRAME_TYPE;
312 } else {
313 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
314 L2CAP_CTRL_SAR;
315 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
316 L2CAP_CTRL_TXSEQ;
317 }
318
319 return packed;
320}
321
322static void __get_enhanced_control(u16 enhanced,
323 struct bt_l2cap_control *control)
324{
325 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
326 L2CAP_CTRL_REQSEQ_SHIFT;
327 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
328 L2CAP_CTRL_FINAL_SHIFT;
329
330 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
331 control->frame_type = 's';
332 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
333 L2CAP_CTRL_POLL_SHIFT;
334 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
335 L2CAP_CTRL_SUPERVISE_SHIFT;
336
337 control->sar = 0;
338 control->txseq = 0;
339 } else {
340 control->frame_type = 'i';
341 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
342 L2CAP_CTRL_SAR_SHIFT;
343 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
344 L2CAP_CTRL_TXSEQ_SHIFT;
345
346 control->poll = 0;
347 control->super = 0;
348 }
349}
350
351static u32 __pack_extended_control(struct bt_l2cap_control *control)
352{
353 u32 packed;
354
355 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
356 L2CAP_EXT_CTRL_REQSEQ;
357 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
358 L2CAP_EXT_CTRL_FINAL;
359
360 if (control->frame_type == 's') {
361 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
362 L2CAP_EXT_CTRL_POLL;
363 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
364 L2CAP_EXT_CTRL_SUPERVISE;
365 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
366 } else {
367 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
368 L2CAP_EXT_CTRL_SAR;
369 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
370 L2CAP_EXT_CTRL_TXSEQ;
371 }
372
373 return packed;
374}
375
376static void __get_extended_control(u32 extended,
377 struct bt_l2cap_control *control)
378{
379 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
380 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
381 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
382 L2CAP_EXT_CTRL_FINAL_SHIFT;
383
384 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
385 control->frame_type = 's';
386 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
387 L2CAP_EXT_CTRL_POLL_SHIFT;
388 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
389 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
390
391 control->sar = 0;
392 control->txseq = 0;
393 } else {
394 control->frame_type = 'i';
395 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
396 L2CAP_EXT_CTRL_SAR_SHIFT;
397 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
398 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
399
400 control->poll = 0;
401 control->super = 0;
402 }
403}
404
405static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
406{
407 BT_DBG("pi %p", pi);
408 __cancel_delayed_work(&pi->ack_work);
409}
410
411static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
412{
413 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
414 if (!delayed_work_pending(&pi->ack_work)) {
415 queue_delayed_work(_l2cap_wq, &pi->ack_work,
416 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
417 }
418}
419
420static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
421{
422 BT_DBG("pi %p", pi);
423 __cancel_delayed_work(&pi->retrans_work);
424}
425
426static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
430 __cancel_delayed_work(&pi->retrans_work);
431 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
432 msecs_to_jiffies(pi->retrans_timeout));
433 }
434}
435
436static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
437{
438 BT_DBG("pi %p", pi);
439 __cancel_delayed_work(&pi->monitor_work);
440}
441
442static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
443{
444 BT_DBG("pi %p", pi);
445 l2cap_ertm_stop_retrans_timer(pi);
446 __cancel_delayed_work(&pi->monitor_work);
447 if (pi->monitor_timeout) {
448 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
449 msecs_to_jiffies(pi->monitor_timeout));
450 }
451}
452
453static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200454{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300455 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200456
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300457 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200459 return cid;
460 }
461
462 return 0;
463}
464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300466{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 sock_hold(sk);
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 if (l->head)
470 l2cap_pi(l->head)->prev_c = sk;
471
472 l2cap_pi(sk)->next_c = l->head;
473 l2cap_pi(sk)->prev_c = NULL;
474 l->head = sk;
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300475}
476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Andrei Emeltchenko0e587be2012-02-21 12:54:57 +0200478{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Andrei Emeltchenko0e587be2012-02-21 12:54:57 +0200480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 write_lock_bh(&l->lock);
482 if (sk == l->head)
483 l->head = next;
484
485 if (next)
486 l2cap_pi(next)->prev_c = prev;
487 if (prev)
488 l2cap_pi(prev)->next_c = next;
489 write_unlock_bh(&l->lock);
490
491 __sock_put(sk);
Andrei Emeltchenko0e587be2012-02-21 12:54:57 +0200492}
493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200495{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 struct l2cap_chan_list *l = &conn->chan_list;
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200497
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300498 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200500
Marcel Holtmann2950f212009-02-12 14:02:50 +0100501 conn->disc_reason = 0x13;
502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 if (!l2cap_pi(sk)->fixed_channel &&
506 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300507 if (conn->hcon->type == LE_LINK) {
508 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
510 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
511 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
512 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
513
514 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
515 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300516 } else {
517 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
519 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300520 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200522 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
524 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
525 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
526 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200527 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
529 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
530 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200531 }
532
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530533 if (l2cap_get_smallest_flushto(l) > l2cap_pi(sk)->flush_to) {
534 /*if flush timeout of the channel is lesser than existing */
535 l2cap_set_acl_flushto(conn->hcon, l2cap_pi(sk)->flush_to);
536 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 /* Otherwise, do not set scid/dcid/omtu. These will be set up
538 * by l2cap_fixed_channel_config()
539 */
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 __l2cap_chan_link(l, sk);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200542}
543
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900544/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200545 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546void l2cap_chan_del(struct sock *sk, int err)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200547{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200549 struct sock *parent = bt_sk(sk)->parent;
550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200554
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900555 if (conn) {
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530556 struct l2cap_chan_list *l = &conn->chan_list;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 /* Unlink from channel list */
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530558 l2cap_chan_unlink(l, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 l2cap_pi(sk)->conn = NULL;
560 if (!l2cap_pi(sk)->fixed_channel)
561 hci_conn_put(conn->hcon);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200562
Srinivas Krovvidi10734192011-12-29 07:29:11 +0530563 read_lock(&l->lock);
564 if (l2cap_pi(sk)->flush_to < l2cap_get_smallest_flushto(l))
565 l2cap_set_acl_flushto(conn->hcon,
566 l2cap_get_smallest_flushto(l));
567 read_unlock(&l->lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200568 }
569
Mat Martineau9f8d4672011-12-14 12:10:46 -0800570 if (l2cap_pi(sk)->ampchan) {
571 struct hci_chan *ampchan = l2cap_pi(sk)->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -0800572 struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
Mat Martineau9f8d4672011-12-14 12:10:46 -0800573 l2cap_pi(sk)->ampchan = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -0800574 l2cap_pi(sk)->ampcon = NULL;
575 l2cap_pi(sk)->amp_id = 0;
576 if (hci_chan_put(ampchan))
577 ampcon->l2cap_data = NULL;
578 else
Peter Krystad2850a822012-02-29 16:58:03 -0800579 l2cap_deaggregate(ampchan, l2cap_pi(sk));
Mat Martineau9f8d4672011-12-14 12:10:46 -0800580 }
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200583 sock_set_flag(sk, SOCK_ZAPPED);
584
585 if (err)
586 sk->sk_err = err;
587
588 if (parent) {
589 bt_accept_unlink(sk);
590 parent->sk_data_ready(parent, 0);
591 } else
592 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300593
Mat Martineau380dcd42011-12-19 10:11:30 -0800594 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 skb_queue_purge(TX_QUEUE(sk));
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
598 if (l2cap_pi(sk)->sdu)
599 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300602
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
604 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
605 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300606 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200607}
608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300610{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 if (sk->sk_type == SOCK_RAW) {
612 switch (l2cap_pi(sk)->sec_level) {
Bhakthavatsala Raghavendrab03b5702013-02-12 19:44:47 +0530613 case BT_SECURITY_VERY_HIGH:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530614 case BT_SECURITY_HIGH:
615 return HCI_AT_DEDICATED_BONDING_MITM;
616 case BT_SECURITY_MEDIUM:
617 return HCI_AT_DEDICATED_BONDING;
618 default:
619 return HCI_AT_NO_BONDING;
620 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
622 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
623 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530624
Bhakthavatsala Raghavendrab03b5702013-02-12 19:44:47 +0530625 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH ||
626 l2cap_pi(sk)->sec_level == BT_SECURITY_VERY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530627 return HCI_AT_NO_BONDING_MITM;
628 else
629 return HCI_AT_NO_BONDING;
630 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 switch (l2cap_pi(sk)->sec_level) {
Bhakthavatsala Raghavendrab03b5702013-02-12 19:44:47 +0530632 case BT_SECURITY_VERY_HIGH:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
637 default:
638 return HCI_AT_NO_BONDING;
639 }
640 }
641}
642
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200643/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200645{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100647 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100650
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
652 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200653}
654
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200656{
657 u8 id;
658
659 /* Get next available identificator.
660 * 1 - 128 are used by kernel.
661 * 129 - 199 are reserved.
662 * 200 - 254 are used by utilities like l2ping, etc.
663 */
664
665 spin_lock_bh(&conn->lock);
666
667 if (++conn->tx_ident > 128)
668 conn->tx_ident = 1;
669
670 id = conn->tx_ident;
671
672 spin_unlock_bh(&conn->lock);
673
674 return id;
675}
676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677static void apply_fcs(struct sk_buff *skb)
678{
679 size_t len;
680 u16 partial_crc;
681 struct sk_buff *iter;
682 struct sk_buff *final_frag = skb;
683
684 if (skb_has_frag_list(skb))
685 len = skb_headlen(skb);
686 else
687 len = skb->len - L2CAP_FCS_SIZE;
688
689 partial_crc = crc16(0, (u8 *) skb->data, len);
690
691 skb_walk_frags(skb, iter) {
692 len = iter->len;
693 if (!iter->next)
694 len -= L2CAP_FCS_SIZE;
695
696 partial_crc = crc16(partial_crc, iter->data, len);
697 final_frag = iter;
698 }
699
700 put_unaligned_le16(partial_crc,
701 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
702}
703
704void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200705{
706 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200707 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200708
709 BT_DBG("code 0x%2.2x", code);
710
711 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300712 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200713
Sunny Kapdi059f5ba2012-07-27 16:19:43 -0700714 if (conn->hcon == NULL || conn->hcon->hdev == NULL)
715 return;
716
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200717 if (lmp_no_flush_capable(conn->hcon->hdev))
718 flags = ACL_START_NO_FLUSH;
719 else
720 flags = ACL_START;
721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200725}
726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727static inline int __l2cap_no_conn_pending(struct sock *sk)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200728{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730}
731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300733{
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +0200734 struct l2cap_conn_req req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
736 req.psm = l2cap_pi(sk)->psm;
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +0200737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +0200739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
741 L2CAP_CONN_REQ, sizeof(req), &req);
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +0200742}
743
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200745{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 struct l2cap_create_chan_req req;
747 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
748 req.psm = l2cap_pi(sk)->psm;
749 req.amp_id = amp_id;
750
751 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
752 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
753
754 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
755 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756}
757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200759{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200761
762 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100763 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
764 return;
765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
767 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200768
Peter Krystadc446d212011-09-20 15:35:50 -0700769 if (l2cap_pi(sk)->amp_pref ==
770 BT_AMP_POLICY_PREFER_AMP &&
Peter Krystad1505bfa2012-06-08 10:47:27 -0700771 enable_hs &&
Peter Krystadc446d212011-09-20 15:35:50 -0700772 conn->fc_mask & L2CAP_FC_A2MP)
773 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 else
775 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200776 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200777 } else {
778 struct l2cap_info_req req;
779 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
780
781 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
782 conn->info_ident = l2cap_get_ident(conn);
783
784 mod_timer(&conn->info_timer, jiffies +
785 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
786
787 l2cap_send_cmd(conn, conn->info_ident,
788 L2CAP_INFO_REQ, sizeof(req), &req);
789 }
790}
791
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300792static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
793{
794 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300795 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300796 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
797
798 switch (mode) {
799 case L2CAP_MODE_ERTM:
800 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
801 case L2CAP_MODE_STREAMING:
802 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
803 default:
804 return 0x00;
805 }
806}
807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300809{
810 struct l2cap_disconn_req req;
811
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300812 if (!conn)
813 return;
814
Mat Martineau380dcd42011-12-19 10:11:30 -0800815 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300817
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
819 skb_queue_purge(SREJ_QUEUE(sk));
820
821 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
822 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
823 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300824 }
825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
827 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300828 l2cap_send_cmd(conn, l2cap_get_ident(conn),
829 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300830
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300832 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300833}
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200836static void l2cap_conn_start(struct l2cap_conn *conn)
837{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838 struct l2cap_chan_list *l = &conn->chan_list;
839 struct sock_del_list del, *tmp1, *tmp2;
840 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200841
842 BT_DBG("conn %p", conn);
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200849 bh_lock_sock(sk);
850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 if (sk->sk_type != SOCK_SEQPACKET &&
852 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200853 bh_unlock_sock(sk);
854 continue;
855 }
856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 if (sk->sk_state == BT_CONNECT) {
858 if (!l2cap_check_security(sk) ||
859 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300860 bh_unlock_sock(sk);
861 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200862 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
865 conn->feat_mask)
866 && l2cap_pi(sk)->conf_state &
867 L2CAP_CONF_STATE2_DEVICE) {
868 tmp1 = kzalloc(sizeof(struct sock_del_list),
869 GFP_ATOMIC);
870 tmp1->sk = sk;
871 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300872 bh_unlock_sock(sk);
873 continue;
874 }
875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300877
Peter Krystadc446d212011-09-20 15:35:50 -0700878 if (l2cap_pi(sk)->amp_pref ==
879 BT_AMP_POLICY_PREFER_AMP &&
Peter Krystad1505bfa2012-06-08 10:47:27 -0700880 enable_hs &&
Peter Krystadc446d212011-09-20 15:35:50 -0700881 conn->fc_mask & L2CAP_FC_A2MP)
882 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 else
884 l2cap_send_conn_req(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200887 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300888 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
890 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100893 if (bt_sk(sk)->defer_setup) {
894 struct sock *parent = bt_sk(sk)->parent;
895 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
896 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Ilia Kolomisnky05e9a2f2011-07-15 18:30:21 +0000897 if (parent)
898 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100899
900 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100902 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
903 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
904 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200905 } else {
906 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
907 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
908 }
909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
911 l2cap_pi(sk)->amp_id) {
912 amp_accept_physical(conn,
913 l2cap_pi(sk)->amp_id, sk);
914 bh_unlock_sock(sk);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300915 continue;
916 }
917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
920
921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300922 rsp.result != L2CAP_CR_SUCCESS) {
923 bh_unlock_sock(sk);
924 continue;
925 }
926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300928 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929 l2cap_build_conf_req(sk, buf), buf);
930 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200931 }
932
933 bh_unlock_sock(sk);
934 }
935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 read_unlock(&l->lock);
937
938 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
939 bh_lock_sock(tmp1->sk);
940 __l2cap_sock_close(tmp1->sk, ECONNRESET);
941 bh_unlock_sock(tmp1->sk);
942 list_del(&tmp1->list);
943 kfree(tmp1);
944 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200945}
946
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700947/* Find socket with fixed cid with given source and destination bdaddrs.
Brian Gix20de7cf2012-02-02 14:56:51 -0800948 * Direction of the req/rsp must match.
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700949 */
Brian Gix20de7cf2012-02-02 14:56:51 -0800950struct sock *l2cap_find_sock_by_fixed_cid_and_dir(__le16 cid, bdaddr_t *src,
951 bdaddr_t *dst, int incoming)
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700952{
953 struct sock *sk = NULL, *sk1 = NULL;
954 struct hlist_node *node;
955
Brian Gix20de7cf2012-02-02 14:56:51 -0800956 BT_DBG(" %d", incoming);
957
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700958 read_lock(&l2cap_sk_list.lock);
959
960 sk_for_each(sk, node, &l2cap_sk_list.head) {
Brian Gix20de7cf2012-02-02 14:56:51 -0800961
962 if (incoming && !l2cap_pi(sk)->incoming)
963 continue;
964
965 if (!incoming && l2cap_pi(sk)->incoming)
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700966 continue;
967
968 if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
969 /* Exact match. */
970 if (!bacmp(&bt_sk(sk)->src, src))
971 break;
972
973 /* Closest match */
974 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
975 sk1 = sk;
976 }
977 }
978
979 read_unlock(&l2cap_sk_list.lock);
980
981 return node ? sk : sk1;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200982}
983
Ville Tervob62f3282011-02-10 22:38:50 -0300984/* Find socket with cid and source bdaddr.
985 * Returns closest match, locked.
986 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300988{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 struct sock *sk = NULL, *sk1 = NULL;
990 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994 sk_for_each(sk, node, &l2cap_sk_list.head) {
995 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300996 continue;
997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300999 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 if (!bacmp(&bt_sk(sk)->src, src))
1001 break;
Ville Tervob62f3282011-02-10 22:38:50 -03001002
1003 /* Closest match */
1004 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -03001006 }
1007 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -03001008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -03001012}
1013
1014static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1015{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 struct l2cap_chan_list *list = &conn->chan_list;
1017 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -03001018
1019 BT_DBG("");
1020
1021 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -03001023 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -03001025 return;
1026
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -03001027 bh_lock_sock(parent);
1028
Ville Tervob62f3282011-02-10 22:38:50 -03001029 /* Check for backlog size */
1030 if (sk_acceptq_is_full(parent)) {
1031 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1032 goto clean;
1033 }
1034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1036 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -03001037 goto clean;
1038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 write_lock_bh(&list->lock);
Gustavo F. Padovan5d41ce12011-04-08 15:40:02 -03001040
Ville Tervob62f3282011-02-10 22:38:50 -03001041 hci_conn_hold(conn->hcon);
1042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -03001044 bacpy(&bt_sk(sk)->src, conn->src);
1045 bacpy(&bt_sk(sk)->dst, conn->dst);
Brian Gix20de7cf2012-02-02 14:56:51 -08001046 l2cap_pi(sk)->incoming = 1;
Ville Tervob62f3282011-02-10 22:38:50 -03001047
Gustavo F. Padovand1010242011-03-25 00:39:48 -03001048 bt_accept_enqueue(parent, sk);
1049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -03001053 parent->sk_data_ready(parent, 0);
1054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001056
1057clean:
1058 bh_unlock_sock(parent);
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03001059}
1060
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001061static void l2cap_conn_ready(struct l2cap_conn *conn)
1062{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 struct l2cap_chan_list *l = &conn->chan_list;
1064 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001065
1066 BT_DBG("conn %p", conn);
1067
Ville Tervob62f3282011-02-10 22:38:50 -03001068 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1069 l2cap_le_conn_ready(conn);
1070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 read_lock(&l->lock);
Vinicius Costa Gomes160dc6a2011-08-19 21:06:55 -03001072
Brian Gixa68668b2011-08-11 15:49:36 -07001073 if (l->head) {
1074 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1075 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001076
Brian Gixa68668b2011-08-11 15:49:36 -07001077 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001078 u8 sec_level = l2cap_pi(sk)->sec_level;
1079 u8 pending_sec = conn->hcon->pending_sec_level;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001080
Brian Gix2e2f50d2011-09-13 12:36:04 -07001081 if (pending_sec > sec_level)
1082 sec_level = pending_sec;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001083
Brian Gix80fb3a92012-01-31 13:15:20 -08001084 if (smp_conn_security(conn, sec_level))
Brian Gixa68668b2011-08-11 15:49:36 -07001085 l2cap_chan_ready(sk);
Ville Tervoacd7d372011-02-10 22:38:49 -03001086
Brian Gix80fb3a92012-01-31 13:15:20 -08001087 hci_conn_put(conn->hcon);
Anderson Brigliab501d6a2011-06-07 18:46:31 -03001088
Brian Gixa68668b2011-08-11 15:49:36 -07001089 } else if (sk->sk_type != SOCK_SEQPACKET &&
1090 sk->sk_type != SOCK_STREAM) {
1091 l2cap_sock_clear_timer(sk);
1092 sk->sk_state = BT_CONNECTED;
1093 sk->sk_state_change(sk);
1094 } else if (sk->sk_state == BT_CONNECT)
1095 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001096
Brian Gixa68668b2011-08-11 15:49:36 -07001097 bh_unlock_sock(sk);
1098 }
1099 } else if (conn->hcon->type == LE_LINK) {
1100 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001101 }
1102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 read_unlock(&l->lock);
Brian Gix20de7cf2012-02-02 14:56:51 -08001104
1105 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1106 l2cap_le_conn_ready(conn);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001107}
1108
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001109/* Notify sockets that we cannot guaranty reliability anymore */
1110static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1111{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 struct l2cap_chan_list *l = &conn->chan_list;
1113 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001114
1115 BT_DBG("conn %p", conn);
1116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1120 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001121 sk->sk_err = err;
1122 }
1123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001125}
1126
1127static void l2cap_info_timeout(unsigned long arg)
1128{
1129 struct l2cap_conn *conn = (void *) arg;
1130
Marcel Holtmann984947d2009-02-06 23:35:19 +01001131 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001132 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001133
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001134 l2cap_conn_start(conn);
1135}
1136
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1138{
Marcel Holtmann01394182006-07-03 10:02:46 +02001139 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Marcel Holtmann01394182006-07-03 10:02:46 +02001141 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 return conn;
1143
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001144 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
Marcel Holtmann01394182006-07-03 10:02:46 +02001145 if (!conn)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001146 return NULL;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 hcon->l2cap_data = conn;
1149 conn->hcon = hcon;
1150
Marcel Holtmann01394182006-07-03 10:02:46 +02001151 BT_DBG("hcon %p conn %p", hcon, conn);
1152
Ville Tervoacd7d372011-02-10 22:38:49 -03001153 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1154 conn->mtu = hcon->hdev->le_mtu;
1155 else
1156 conn->mtu = hcon->hdev->acl_mtu;
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 conn->src = &hcon->hdev->bdaddr;
1159 conn->dst = &hcon->dst;
1160
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001161 conn->feat_mask = 0;
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001166 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001167 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001168 (unsigned long) conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001169 else
Ville Tervob62f3282011-02-10 22:38:50 -03001170 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001171 (unsigned long) conn);
1172
Marcel Holtmann2950f212009-02-12 14:02:50 +01001173 conn->disc_reason = 0x13;
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 return conn;
1176}
1177
Mat Martineau3b9239a2012-02-16 11:54:30 -08001178static void l2cap_conn_del(struct hci_conn *hcon, int err, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 struct l2cap_conn *conn = hcon->l2cap_data;
1181 struct sock *sk;
1182 struct sock *next;
1183
1184 if (!conn)
1185 return;
1186
1187 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1188
1189 if ((conn->hcon == hcon) && (conn->rx_skb))
1190 kfree_skb(conn->rx_skb);
1191
1192 BT_DBG("conn->hcon %p", conn->hcon);
1193
1194 /* Kill channels */
1195 for (sk = conn->chan_list.head; sk; ) {
1196 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1197 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1198 next = l2cap_pi(sk)->next_c;
Mat Martineau3b9239a2012-02-16 11:54:30 -08001199 if (is_process)
1200 lock_sock(sk);
1201 else
1202 bh_lock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 l2cap_chan_del(sk, err);
Mat Martineau3b9239a2012-02-16 11:54:30 -08001204 if (is_process)
1205 release_sock(sk);
1206 else
1207 bh_unlock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208 l2cap_sock_kill(sk);
1209 sk = next;
1210 } else
1211 sk = l2cap_pi(sk)->next_c;
1212 }
1213
1214 if (conn->hcon == hcon) {
1215 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1216 del_timer_sync(&conn->info_timer);
1217
1218 hcon->l2cap_data = NULL;
1219
1220 kfree(conn);
1221 }
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -07001222 att_chn_params.conn = NULL;
1223 BT_DBG("att_chn_params.conn set to NULL");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224}
1225
1226static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1227{
1228 struct l2cap_chan_list *l = &conn->chan_list;
1229 write_lock_bh(&l->lock);
1230 __l2cap_chan_add(conn, sk);
1231 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232}
1233
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
1236/* Find socket with psm and source bdaddr.
1237 * Returns closest match.
1238 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241 struct sock *sk = NULL, *sk1 = NULL;
1242 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001244 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 sk_for_each(sk, node, &l2cap_sk_list.head) {
1247 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 continue;
1249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252 if (!bacmp(&bt_sk(sk)->src, src))
1253 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 /* Closest match */
1256 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
1259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
1268 bdaddr_t *src = &bt_sk(sk)->src;
1269 bdaddr_t *dst = &bt_sk(sk)->dst;
1270 struct l2cap_conn *conn;
1271 struct hci_conn *hcon;
1272 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001273 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001274 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001276 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001279 hdev = hci_get_route(dst, src);
1280 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -EHOSTUNREACH;
1282
1283 hci_dev_lock_bh(hdev);
1284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 auth_type = l2cap_get_auth_type(sk);
Gustavo F. Padovan03a00192011-12-09 04:48:17 -02001286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 if (l2cap_pi(sk)->fixed_channel) {
1288 /* Fixed channels piggyback on existing ACL connections */
1289 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1290 if (!hcon || !hcon->l2cap_data) {
1291 err = -ENOTCONN;
1292 goto done;
1293 }
Gustavo F. Padovan03a00192011-12-09 04:48:17 -02001294
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 conn = hcon->l2cap_data;
1296 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001297 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Brian Gixa94b6122012-02-23 16:07:10 -08001298 hcon = hci_le_connect(hdev, 0, dst,
1299 l2cap_pi(sk)->sec_level, auth_type,
1300 &bt_sk(sk)->le_params);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001301 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001303 l2cap_pi(sk)->sec_level, auth_type);
Gustavo F. Padovan03a00192011-12-09 04:48:17 -02001304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 if (IS_ERR(hcon)) {
1306 err = PTR_ERR(hcon);
1307 goto done;
1308 }
Gustavo F. Padovan03a00192011-12-09 04:48:17 -02001309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 conn = l2cap_conn_add(hcon, 0);
1311 if (!conn) {
1312 hci_conn_put(hcon);
1313 err = -ENOMEM;
1314 goto done;
1315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 }
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 /* Update source addr of the socket */
1319 bacpy(src, conn->src);
1320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001322
Brian Gixa68668b2011-08-11 15:49:36 -07001323 if ((l2cap_pi(sk)->fixed_channel) ||
1324 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1325 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 sk->sk_state = BT_CONNECTED;
1327 sk->sk_state_change(sk);
1328 } else {
1329 sk->sk_state = BT_CONNECT;
Brian Gixa94b6122012-02-23 16:07:10 -08001330 /* If we have valid LE Params, let timeout override default */
1331 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1332 l2cap_sock_le_params_valid(&bt_sk(sk)->le_params)) {
1333 u16 timeout = bt_sk(sk)->le_params.conn_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Brian Gixa94b6122012-02-23 16:07:10 -08001335 if (timeout)
1336 l2cap_sock_set_timer(sk,
1337 msecs_to_jiffies(timeout*1000));
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001338 } else
Brian Gixa94b6122012-02-23 16:07:10 -08001339 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 sk->sk_state_change(sk);
1342
1343 if (hcon->state == BT_CONNECTED) {
1344 if (sk->sk_type != SOCK_SEQPACKET &&
1345 sk->sk_type != SOCK_STREAM) {
1346 l2cap_sock_clear_timer(sk);
1347 if (l2cap_check_security(sk)) {
1348 sk->sk_state = BT_CONNECTED;
1349 sk->sk_state_change(sk);
1350 }
1351 } else
1352 l2cap_do_start(sk);
1353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 }
1355
Ville Tervo30e76272011-02-22 16:10:53 -03001356 err = 0;
1357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358done:
1359 hci_dev_unlock_bh(hdev);
1360 hci_dev_put(hdev);
1361 return err;
1362}
1363
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001364int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001365{
1366 DECLARE_WAITQUEUE(wait, current);
1367 int err = 0;
1368 int timeo = HZ/5;
1369
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001370 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1372 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1373 set_current_state(TASK_INTERRUPTIBLE);
1374
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001375 if (!timeo)
1376 timeo = HZ/5;
1377
1378 if (signal_pending(current)) {
1379 err = sock_intr_errno(timeo);
1380 break;
1381 }
1382
1383 release_sock(sk);
1384 timeo = schedule_timeout(timeo);
1385 lock_sock(sk);
1386
1387 err = sock_error(sk);
1388 if (err)
1389 break;
1390 }
1391 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001392 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001393 return err;
1394}
1395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001397{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 struct l2cap_pinfo *pi =
1399 container_of(work, struct l2cap_pinfo, tx_work);
1400 struct sock *sk = (struct sock *)pi;
1401 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 lock_sock(sk);
1404 l2cap_ertm_send(sk);
1405 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07001406 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001407}
1408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001410{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411 struct sock *sk = skb->sk;
1412 int queued;
Mat Martineau2f0cd842011-10-20 14:34:26 -07001413 int keep_sk = 0;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1416 if (queued < L2CAP_MIN_ERTM_QUEUED)
Mat Martineau2f0cd842011-10-20 14:34:26 -07001417 keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001418
Mat Martineau2f0cd842011-10-20 14:34:26 -07001419 if (!keep_sk)
1420 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001421}
1422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001424{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001425 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1430 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1431 BT_DBG("Sending on AMP connection %p %p",
1432 pi->ampcon, pi->ampchan);
1433 if (pi->ampchan)
1434 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1435 ACL_COMPLETE);
1436 else
1437 kfree_skb(skb);
1438 } else {
1439 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001440
Bhakthavatsala Raghavendrac3fa7062012-10-04 19:11:53 +05301441 if (!(pi->conn)) {
1442 kfree_skb(skb);
1443 return;
1444 }
1445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 bt_cb(skb)->force_active = pi->force_active;
1447 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001449 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1450 !l2cap_pi(sk)->flushable)
1451 flags = ACL_START_NO_FLUSH;
1452 else
1453 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001456 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001457}
1458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001460{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001461 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 struct l2cap_pinfo *pi = l2cap_pi(sk);
1463 struct bt_l2cap_control *control;
1464 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001469 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1472 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1475 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1476 return 0;
1477
1478 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1479 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1480 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1481
1482 skb = sk->sk_send_head;
1483
1484 bt_cb(skb)->retries = 1;
1485 control = &bt_cb(skb)->control;
1486
1487 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1488 control->final = 1;
1489 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1490 }
1491 control->reqseq = pi->buffer_seq;
1492 pi->last_acked_seq = pi->buffer_seq;
1493 control->txseq = pi->next_tx_seq;
1494
1495 if (pi->extended_control) {
1496 put_unaligned_le32(__pack_extended_control(control),
1497 skb->data + L2CAP_HDR_SIZE);
1498 } else {
1499 put_unaligned_le16(__pack_enhanced_control(control),
1500 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001501 }
1502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 if (pi->fcs == L2CAP_FCS_CRC16)
1504 apply_fcs(skb);
1505
1506 /* Clone after data has been modified. Data is assumed to be
1507 read-only (for locking purposes) on cloned sk_buffs.
1508 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001509 tx_skb = skb_clone(skb, GFP_ATOMIC);
1510
Mat Martineau0c04ef92011-12-07 16:41:22 -08001511 if (!tx_skb)
1512 break;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001513
Mat Martineau2f0cd842011-10-20 14:34:26 -07001514 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 tx_skb->sk = sk;
1516 tx_skb->destructor = l2cap_skb_destructor;
1517 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001519 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001521 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1522 pi->unacked_frames += 1;
1523 pi->frames_sent += 1;
1524 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1527 sk->sk_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001528 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
Mat Martineauff105392011-12-22 12:02:13 -08001530
1531 l2cap_do_send(sk, tx_skb);
1532 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001533 }
1534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1536 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1537 atomic_read(&pi->ertm_queued));
1538
1539 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001540}
1541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001543{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544 struct sk_buff *skb;
1545 struct l2cap_pinfo *pi = l2cap_pi(sk);
1546 struct bt_l2cap_control *control;
1547 int sent = 0;
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 BT_DBG("sk %p, skbs %p", sk, skbs);
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 if (sk->sk_state != BT_CONNECTED)
1552 return -ENOTCONN;
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1555 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1556 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001559
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1561 while (!skb_queue_empty(TX_QUEUE(sk))) {
1562
1563 skb = skb_dequeue(TX_QUEUE(sk));
1564
1565 BT_DBG("skb %p", skb);
1566
1567 bt_cb(skb)->retries = 1;
1568 control = &bt_cb(skb)->control;
1569
1570 BT_DBG("control %p", control);
1571
1572 control->reqseq = 0;
1573 control->txseq = pi->next_tx_seq;
1574
1575 if (pi->extended_control) {
1576 put_unaligned_le32(__pack_extended_control(control),
1577 skb->data + L2CAP_HDR_SIZE);
1578 } else {
1579 put_unaligned_le16(__pack_enhanced_control(control),
1580 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001581 }
1582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583 if (pi->fcs == L2CAP_FCS_CRC16)
1584 apply_fcs(skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1591 pi->frames_sent += 1;
1592 sent += 1;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001593 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595 BT_DBG("Sent %d", sent);
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001598}
1599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Szymon Jancb17e73b2012-01-11 10:59:47 +01001601{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602 while (len > 0) {
1603 if (iv->iov_len) {
1604 int copy = min_t(unsigned int, len, iv->iov_len);
1605 memcpy(kdata, iv->iov_base, copy);
1606 len -= copy;
1607 kdata += copy;
1608 iv->iov_base += copy;
1609 iv->iov_len -= copy;
1610 }
1611 iv++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001612 }
1613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 return 0;
Szymon Jancb17e73b2012-01-11 10:59:47 +01001615}
1616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1618 int len, int count, struct sk_buff *skb,
1619 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001622 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001623 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001624 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1627 msg, (int)len, (int)count, skb);
1628
1629 if (!conn)
1630 return -ENOTCONN;
1631
1632 /* When resegmenting, data is copied from kernel space */
1633 if (reseg) {
1634 err = memcpy_fromkvec(skb_put(skb, count),
1635 (struct kvec *) msg->msg_iov, count);
1636 } else {
1637 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1638 count);
1639 }
1640
1641 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001642 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
1644 sent += count;
1645 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648 /* Continuation fragments (no L2CAP header) */
1649 frag = &skb_shinfo(skb)->frag_list;
1650 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 count = min_t(unsigned int, conn->mtu, len);
1653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 /* Add room for the FCS if it fits */
1655 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1656 len + L2CAP_FCS_SIZE <= conn->mtu)
1657 skblen = count + L2CAP_FCS_SIZE;
1658 else
1659 skblen = count;
1660
1661 /* Don't use bt_skb_send_alloc() while resegmenting, since
1662 * it is not ok to block.
1663 */
1664 if (reseg) {
1665 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1666 if (*frag)
1667 skb_set_owner_w(*frag, sk);
1668 } else {
1669 *frag = bt_skb_send_alloc(sk, skblen,
1670 msg->msg_flags & MSG_DONTWAIT, &err);
1671 }
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02001672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 if (!*frag)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001674 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 /* When resegmenting, data is copied from kernel space */
1677 if (reseg) {
1678 err = memcpy_fromkvec(skb_put(*frag, count),
1679 (struct kvec *) msg->msg_iov,
1680 count);
1681 } else {
1682 err = memcpy_fromiovec(skb_put(*frag, count),
1683 msg->msg_iov, count);
1684 }
1685
1686 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return -EFAULT;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02001688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 sent += count;
1690 len -= count;
1691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 final = *frag;
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 frag = &(*frag)->next;
1695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1698 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1699 if (reseg) {
1700 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1701 GFP_ATOMIC);
1702 if (*frag)
1703 skb_set_owner_w(*frag, sk);
1704 } else {
1705 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1706 msg->msg_flags & MSG_DONTWAIT,
1707 &err);
1708 }
1709
1710 if (!*frag)
1711 return -EFAULT;
1712
1713 final = *frag;
1714 }
1715
1716 skb_put(final, L2CAP_FCS_SIZE);
1717 }
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001720}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001723{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001725 struct sk_buff *skb;
1726 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1727 struct l2cap_hdr *lh;
1728
1729 BT_DBG("sk %p len %d", sk, (int)len);
1730
1731 count = min_t(unsigned int, (conn->mtu - hlen), len);
1732 skb = bt_skb_send_alloc(sk, count + hlen,
1733 msg->msg_flags & MSG_DONTWAIT, &err);
1734 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001735 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001736
1737 /* Create L2CAP header */
1738 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001739 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001740 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001741 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001743 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001744 if (unlikely(err < 0)) {
1745 kfree_skb(skb);
1746 return ERR_PTR(err);
1747 }
1748 return skb;
1749}
1750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001752{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001753 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001754 struct sk_buff *skb;
1755 int err, count, hlen = L2CAP_HDR_SIZE;
1756 struct l2cap_hdr *lh;
1757
1758 BT_DBG("sk %p len %d", sk, (int)len);
1759
1760 count = min_t(unsigned int, (conn->mtu - hlen), len);
1761 skb = bt_skb_send_alloc(sk, count + hlen,
1762 msg->msg_flags & MSG_DONTWAIT, &err);
1763 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001764 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001765
1766 /* Create L2CAP header */
1767 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001769 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001772 if (unlikely(err < 0)) {
1773 kfree_skb(skb);
1774 return ERR_PTR(err);
1775 }
1776 return skb;
1777}
1778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1780 struct msghdr *msg, size_t len,
1781 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001782{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001783 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03001784 int err, count, hlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001786 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 if (l2cap_pi(sk)->extended_control)
1790 hlen = L2CAP_EXTENDED_HDR_SIZE;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03001791 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 hlen = L2CAP_ENHANCED_HDR_SIZE;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03001793
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001794 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001795 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797 if (fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001798 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1801 sk, msg, (int)len, (int)sdulen, hlen);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02001802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1804
1805 /* Allocate extra headroom for Qualcomm PAL. This is only
1806 * necessary in two places (here and when creating sframes)
1807 * because only unfragmented iframes and sframes are sent
1808 * using AMP controllers.
1809 */
1810 if (l2cap_pi(sk)->ampcon &&
1811 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1812 reserve = BT_SKB_RESERVE_80211;
1813
1814 /* Don't use bt_skb_send_alloc() while resegmenting, since
1815 * it is not ok to block.
1816 */
1817 if (reseg) {
1818 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1819 if (skb)
1820 skb_set_owner_w(skb, sk);
1821 } else {
1822 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02001823 msg->msg_flags & MSG_DONTWAIT, &err);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001825 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001826 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001828 if (reserve)
1829 skb_reserve(skb, reserve);
1830
1831 bt_cb(skb)->control.fcs = fcs;
1832
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001833 /* Create L2CAP header */
1834 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001835 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1836 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001838 /* Control header is populated later */
1839 if (l2cap_pi(sk)->extended_control)
1840 put_unaligned_le32(0, skb_put(skb, 4));
1841 else
1842 put_unaligned_le16(0, skb_put(skb, 2));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001843
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001844 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001845 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001847 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001848 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001849 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001850 kfree_skb(skb);
1851 return ERR_PTR(err);
1852 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001853
1854 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001855 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
1857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001859{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860 struct l2cap_pinfo *pi;
1861 struct sk_buff *acked_skb;
1862 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001867
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1869 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1872 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1873
1874 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1875 ackseq = __next_seq(ackseq, pi)) {
1876
1877 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1878 if (acked_skb) {
1879 skb_unlink(acked_skb, TX_QUEUE(sk));
1880 kfree_skb(acked_skb);
1881 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001882 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001883 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 pi->expected_ack_seq = reqseq;
1886
1887 if (pi->unacked_frames == 0)
1888 l2cap_ertm_stop_retrans_timer(pi);
1889
1890 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001891}
1892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001894{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001895 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896 int len;
1897 int reserve = 0;
1898 struct l2cap_hdr *lh;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900 if (l2cap_pi(sk)->extended_control)
1901 len = L2CAP_EXTENDED_HDR_SIZE;
1902 else
1903 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1906 len += L2CAP_FCS_SIZE;
1907
1908 /* Allocate extra headroom for Qualcomm PAL */
1909 if (l2cap_pi(sk)->ampcon &&
1910 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1911 reserve = BT_SKB_RESERVE_80211;
1912
1913 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1914
1915 if (!skb)
1916 return ERR_PTR(-ENOMEM);
1917
1918 if (reserve)
1919 skb_reserve(skb, reserve);
1920
1921 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1922 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1923 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1924
1925 if (l2cap_pi(sk)->extended_control)
1926 put_unaligned_le32(control, skb_put(skb, 4));
1927 else
1928 put_unaligned_le16(control, skb_put(skb, 2));
1929
1930 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1931 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1932 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001933 }
1934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 return skb;
1936}
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938static void l2cap_ertm_send_sframe(struct sock *sk,
1939 struct bt_l2cap_control *control)
1940{
1941 struct l2cap_pinfo *pi;
1942 struct sk_buff *skb;
1943 u32 control_field;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947 if (control->frame_type != 's')
1948 return;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 pi = l2cap_pi(sk);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1953 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1954 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1955 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1956 return;
1957 }
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03001958
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001959 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1960 control->final = 1;
1961 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1962 }
1963
1964 if (control->super == L2CAP_SFRAME_RR)
1965 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1966 else if (control->super == L2CAP_SFRAME_RNR)
1967 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1968
1969 if (control->super != L2CAP_SFRAME_SREJ) {
1970 pi->last_acked_seq = control->reqseq;
1971 l2cap_ertm_stop_ack_timer(pi);
1972 }
1973
1974 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1975 (int) control->final, (int) control->poll,
1976 (int) control->super);
1977
1978 if (pi->extended_control)
1979 control_field = __pack_extended_control(control);
1980 else
1981 control_field = __pack_enhanced_control(control);
1982
1983 skb = l2cap_create_sframe_pdu(sk, control_field);
1984 if (!IS_ERR(skb))
1985 l2cap_do_send(sk, skb);
1986}
1987
1988static void l2cap_ertm_send_ack(struct sock *sk)
1989{
1990 struct l2cap_pinfo *pi = l2cap_pi(sk);
1991 struct bt_l2cap_control control;
1992 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1993 int threshold;
1994
1995 BT_DBG("sk %p", sk);
1996 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1997 (int)pi->buffer_seq);
1998
1999 memset(&control, 0, sizeof(control));
2000 control.frame_type = 's';
2001
2002 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2003 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
2004 l2cap_ertm_stop_ack_timer(pi);
2005 control.super = L2CAP_SFRAME_RNR;
2006 control.reqseq = pi->buffer_seq;
2007 l2cap_ertm_send_sframe(sk, &control);
2008 } else {
2009 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
2010 l2cap_ertm_send(sk);
2011 /* If any i-frames were sent, they included an ack */
2012 if (pi->buffer_seq == pi->last_acked_seq)
2013 frames_to_ack = 0;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002014 }
2015
Mat Martineauda318242012-06-22 11:36:18 -07002016 /* Ack now if the window is 3/4ths full.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 * Calculate without mul or div
2018 */
Mat Martineauda318242012-06-22 11:36:18 -07002019 threshold = pi->ack_win;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020 threshold += threshold << 1;
2021 threshold >>= 2;
2022
2023 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
2024 threshold);
2025
2026 if (frames_to_ack >= threshold) {
2027 l2cap_ertm_stop_ack_timer(pi);
2028 control.super = L2CAP_SFRAME_RR;
2029 control.reqseq = pi->buffer_seq;
2030 l2cap_ertm_send_sframe(sk, &control);
2031 frames_to_ack = 0;
2032 }
2033
2034 if (frames_to_ack)
2035 l2cap_ertm_start_ack_timer(pi);
2036 }
2037}
2038
2039static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
2040{
2041 struct l2cap_pinfo *pi;
2042 struct bt_l2cap_control control;
2043
2044 BT_DBG("sk %p, poll %d", sk, (int) poll);
2045
2046 pi = l2cap_pi(sk);
2047
2048 memset(&control, 0, sizeof(control));
2049 control.frame_type = 's';
2050 control.poll = poll;
2051
2052 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
2053 control.super = L2CAP_SFRAME_RNR;
2054 else
2055 control.super = L2CAP_SFRAME_RR;
2056
2057 control.reqseq = pi->buffer_seq;
2058 l2cap_ertm_send_sframe(sk, &control);
2059}
2060
2061static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
2062{
2063 struct l2cap_pinfo *pi;
2064 struct bt_l2cap_control control;
2065
2066 BT_DBG("sk %p", sk);
2067
2068 pi = l2cap_pi(sk);
2069
2070 memset(&control, 0, sizeof(control));
2071 control.frame_type = 's';
2072 control.final = 1;
2073 control.reqseq = pi->buffer_seq;
2074 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
2075
2076 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2077 control.super = L2CAP_SFRAME_RNR;
2078 l2cap_ertm_send_sframe(sk, &control);
2079 }
2080
2081 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
2082 (pi->unacked_frames > 0))
2083 l2cap_ertm_start_retrans_timer(pi);
2084
2085 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
2086
2087 /* Send pending iframes */
2088 l2cap_ertm_send(sk);
2089
2090 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
2091 /* F-bit wasn't sent in an s-frame or i-frame yet, so
2092 * send it now.
2093 */
2094 control.super = L2CAP_SFRAME_RR;
2095 l2cap_ertm_send_sframe(sk, &control);
2096 }
2097}
2098
2099static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
2100{
2101 struct bt_l2cap_control control;
2102 struct l2cap_pinfo *pi;
2103 u16 seq;
2104
2105 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2106
2107 pi = l2cap_pi(sk);
2108 memset(&control, 0, sizeof(control));
2109 control.frame_type = 's';
2110 control.super = L2CAP_SFRAME_SREJ;
2111
2112 for (seq = pi->expected_tx_seq; seq != txseq;
2113 seq = __next_seq(seq, pi)) {
2114 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2115 control.reqseq = seq;
2116 l2cap_ertm_send_sframe(sk, &control);
2117 l2cap_seq_list_append(&pi->srej_list, seq);
2118 }
2119 }
2120
2121 pi->expected_tx_seq = __next_seq(txseq, pi);
2122}
2123
2124static void l2cap_ertm_send_srej_tail(struct sock *sk)
2125{
2126 struct bt_l2cap_control control;
2127 struct l2cap_pinfo *pi;
2128
2129 BT_DBG("sk %p", sk);
2130
2131 pi = l2cap_pi(sk);
2132
2133 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2134 return;
2135
2136 memset(&control, 0, sizeof(control));
2137 control.frame_type = 's';
2138 control.super = L2CAP_SFRAME_SREJ;
2139 control.reqseq = pi->srej_list.tail;
2140 l2cap_ertm_send_sframe(sk, &control);
2141}
2142
2143static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2144{
2145 struct bt_l2cap_control control;
2146 struct l2cap_pinfo *pi;
2147 u16 initial_head;
2148 u16 seq;
2149
2150 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2151
2152 pi = l2cap_pi(sk);
2153 memset(&control, 0, sizeof(control));
2154 control.frame_type = 's';
2155 control.super = L2CAP_SFRAME_SREJ;
2156
2157 /* Capture initial list head to allow only one pass through the list. */
2158 initial_head = pi->srej_list.head;
2159
2160 do {
2161 seq = l2cap_seq_list_pop(&pi->srej_list);
2162 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2163 break;
2164
2165 control.reqseq = seq;
2166 l2cap_ertm_send_sframe(sk, &control);
2167 l2cap_seq_list_append(&pi->srej_list, seq);
2168 } while (pi->srej_list.head != initial_head);
2169}
2170
2171static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2172{
2173 struct l2cap_pinfo *pi = l2cap_pi(sk);
2174 BT_DBG("sk %p", sk);
2175
2176 pi->expected_tx_seq = pi->buffer_seq;
2177 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2178 skb_queue_purge(SREJ_QUEUE(sk));
2179 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2180}
2181
2182static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2183 struct bt_l2cap_control *control,
2184 struct sk_buff_head *skbs, u8 event)
2185{
2186 struct l2cap_pinfo *pi;
2187 int err = 0;
2188
2189 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2190 (int)event);
2191 pi = l2cap_pi(sk);
2192
2193 switch (event) {
2194 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2195 if (sk->sk_send_head == NULL)
2196 sk->sk_send_head = skb_peek(skbs);
2197
2198 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2199 l2cap_ertm_send(sk);
2200 break;
2201 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2202 BT_DBG("Enter LOCAL_BUSY");
2203 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2204
2205 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2206 /* The SREJ_SENT state must be aborted if we are to
2207 * enter the LOCAL_BUSY state.
2208 */
2209 l2cap_ertm_abort_rx_srej_sent(sk);
2210 }
2211
2212 l2cap_ertm_send_ack(sk);
2213
2214 break;
2215 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2216 BT_DBG("Exit LOCAL_BUSY");
2217 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2218
2219 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2220 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2221 pi->amp_move_state =
2222 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2223 l2cap_send_move_chan_cfm(pi->conn, pi,
2224 pi->scid,
2225 L2CAP_MOVE_CHAN_CONFIRMED);
2226 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2227 } else if (pi->amp_move_role ==
2228 L2CAP_AMP_MOVE_RESPONDER) {
2229 pi->amp_move_state =
2230 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2231 l2cap_send_move_chan_rsp(pi->conn,
2232 pi->amp_move_cmd_ident,
2233 pi->dcid,
2234 L2CAP_MOVE_CHAN_SUCCESS);
2235 }
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002236 break;
2237 }
2238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002239 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2240 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2241 struct bt_l2cap_control local_control;
2242
2243 memset(&local_control, 0, sizeof(local_control));
2244 local_control.frame_type = 's';
2245 local_control.super = L2CAP_SFRAME_RR;
2246 local_control.poll = 1;
2247 local_control.reqseq = pi->buffer_seq;
2248 l2cap_ertm_send_sframe(sk, &local_control);
2249
2250 pi->retry_count = 1;
2251 l2cap_ertm_start_monitor_timer(pi);
2252 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002253 }
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002254 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002255 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2256 l2cap_ertm_process_reqseq(sk, control->reqseq);
2257 break;
2258 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2259 l2cap_ertm_send_rr_or_rnr(sk, 1);
2260 pi->retry_count = 1;
2261 l2cap_ertm_start_monitor_timer(pi);
2262 l2cap_ertm_stop_ack_timer(pi);
2263 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2264 break;
2265 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2266 l2cap_ertm_send_rr_or_rnr(sk, 1);
2267 pi->retry_count = 1;
2268 l2cap_ertm_start_monitor_timer(pi);
2269 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2270 break;
2271 case L2CAP_ERTM_EVENT_RECV_FBIT:
2272 /* Nothing to process */
2273 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002274 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002276 }
2277
2278 return err;
2279}
2280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2282 struct bt_l2cap_control *control,
2283 struct sk_buff_head *skbs, u8 event)
2284{
2285 struct l2cap_pinfo *pi;
2286 int err = 0;
2287
2288 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2289 (int)event);
2290 pi = l2cap_pi(sk);
2291
2292 switch (event) {
2293 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2294 if (sk->sk_send_head == NULL)
2295 sk->sk_send_head = skb_peek(skbs);
2296 /* Queue data, but don't send. */
2297 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2298 break;
2299 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2300 BT_DBG("Enter LOCAL_BUSY");
2301 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2302
2303 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2304 /* The SREJ_SENT state must be aborted if we are to
2305 * enter the LOCAL_BUSY state.
2306 */
2307 l2cap_ertm_abort_rx_srej_sent(sk);
2308 }
2309
2310 l2cap_ertm_send_ack(sk);
2311
2312 break;
2313 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2314 BT_DBG("Exit LOCAL_BUSY");
2315 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2316
2317 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2318 struct bt_l2cap_control local_control;
2319 memset(&local_control, 0, sizeof(local_control));
2320 local_control.frame_type = 's';
2321 local_control.super = L2CAP_SFRAME_RR;
2322 local_control.poll = 1;
2323 local_control.reqseq = pi->buffer_seq;
2324 l2cap_ertm_send_sframe(sk, &local_control);
2325
2326 pi->retry_count = 1;
2327 l2cap_ertm_start_monitor_timer(pi);
2328 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2329 }
2330 break;
2331 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2332 l2cap_ertm_process_reqseq(sk, control->reqseq);
2333
2334 /* Fall through */
2335
2336 case L2CAP_ERTM_EVENT_RECV_FBIT:
2337 if (control && control->final) {
2338 l2cap_ertm_stop_monitor_timer(pi);
2339 if (pi->unacked_frames > 0)
2340 l2cap_ertm_start_retrans_timer(pi);
2341 pi->retry_count = 0;
2342 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2343 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2344 }
2345 break;
2346 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2347 /* Ignore */
2348 break;
2349 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2350 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2351 l2cap_ertm_send_rr_or_rnr(sk, 1);
2352 l2cap_ertm_start_monitor_timer(pi);
2353 pi->retry_count += 1;
2354 } else
2355 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2356 break;
2357 default:
2358 break;
2359 }
2360
2361 return err;
2362}
2363
2364int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2365 struct sk_buff_head *skbs, u8 event)
2366{
2367 struct l2cap_pinfo *pi;
2368 int err = 0;
2369
2370 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2371 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2372
2373 pi = l2cap_pi(sk);
2374
2375 switch (pi->tx_state) {
2376 case L2CAP_ERTM_TX_STATE_XMIT:
2377 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2378 break;
2379 case L2CAP_ERTM_TX_STATE_WAIT_F:
2380 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2381 break;
2382 default:
2383 /* Ignore event */
2384 break;
2385 }
2386
2387 return err;
2388}
2389
2390int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2391 struct msghdr *msg, size_t len, int reseg)
2392{
2393 struct sk_buff *skb;
2394 u16 sdu_len;
2395 size_t pdu_len;
2396 int err = 0;
2397 u8 sar;
2398
2399 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2400
2401 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2402 * so fragmented skbs are not used. The HCI layer's handling
2403 * of fragmented skbs is not compatible with ERTM's queueing.
2404 */
2405
2406 /* PDU size is derived from the HCI MTU */
2407 pdu_len = l2cap_pi(sk)->conn->mtu;
2408
2409 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2410 if (!l2cap_pi(sk)->ampcon)
2411 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2412
2413 /* Adjust for largest possible L2CAP overhead. */
2414 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2415
2416 /* Remote device may have requested smaller PDUs */
2417 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2418
2419 if (len <= pdu_len) {
2420 sar = L2CAP_SAR_UNSEGMENTED;
2421 sdu_len = 0;
2422 pdu_len = len;
2423 } else {
2424 sar = L2CAP_SAR_START;
2425 sdu_len = len;
2426 pdu_len -= L2CAP_SDULEN_SIZE;
2427 }
2428
2429 while (len) {
2430 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2431
2432 BT_DBG("iframe skb %p", skb);
2433
2434 if (IS_ERR(skb)) {
2435 __skb_queue_purge(seg_queue);
2436 return PTR_ERR(skb);
2437 }
2438
2439 bt_cb(skb)->control.sar = sar;
2440 __skb_queue_tail(seg_queue, skb);
2441
2442 len -= pdu_len;
2443 if (sdu_len) {
2444 sdu_len = 0;
2445 pdu_len += L2CAP_SDULEN_SIZE;
2446 }
2447
2448 if (len <= pdu_len) {
2449 sar = L2CAP_SAR_END;
2450 pdu_len = len;
2451 } else {
2452 sar = L2CAP_SAR_CONTINUE;
2453 }
2454 }
2455
2456 return err;
2457}
2458
2459static inline int is_initial_frame(u8 sar)
2460{
2461 return (sar == L2CAP_SAR_UNSEGMENTED ||
2462 sar == L2CAP_SAR_START);
2463}
2464
2465static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2466 size_t veclen)
2467{
2468 struct sk_buff *frag_iter;
2469
2470 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2471
2472 if (iv->iov_len + skb->len > veclen)
2473 return -ENOMEM;
2474
2475 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2476 iv->iov_len += skb->len;
2477
2478 skb_walk_frags(skb, frag_iter) {
2479 if (iv->iov_len + skb->len > veclen)
2480 return -ENOMEM;
2481
2482 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2483 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2484 frag_iter->len);
2485 iv->iov_len += frag_iter->len;
2486 }
2487
2488 return 0;
2489}
2490
2491int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2492{
2493 void *buf;
2494 int buflen;
2495 int err = 0;
2496 struct sk_buff *skb;
2497 struct msghdr msg;
2498 struct kvec iv;
2499 struct sk_buff_head old_frames;
2500 struct l2cap_pinfo *pi = l2cap_pi(sk);
2501
2502 BT_DBG("sk %p", sk);
2503
2504 if (skb_queue_empty(queue))
2505 return 0;
2506
2507 memset(&msg, 0, sizeof(msg));
2508 msg.msg_iov = (struct iovec *) &iv;
2509
2510 buflen = pi->omtu + L2CAP_FCS_SIZE;
2511 buf = kzalloc(buflen, GFP_TEMPORARY);
2512
2513 if (!buf) {
2514 BT_DBG("Could not allocate resegmentation buffer");
2515 return -ENOMEM;
2516 }
2517
2518 /* Move current frames off the original queue */
2519 __skb_queue_head_init(&old_frames);
2520 skb_queue_splice_tail_init(queue, &old_frames);
2521
2522 while (!skb_queue_empty(&old_frames)) {
2523 struct sk_buff_head current_sdu;
2524 u8 original_sar;
2525
2526 /* Reassemble each SDU from one or more PDUs */
2527
2528 iv.iov_base = buf;
2529 iv.iov_len = 0;
2530
2531 skb = skb_peek(&old_frames);
2532 original_sar = bt_cb(skb)->control.sar;
2533
2534 __skb_unlink(skb, &old_frames);
2535
2536 /* Append data to SDU */
2537 if (pi->extended_control)
2538 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2539 else
2540 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2541
2542 if (original_sar == L2CAP_SAR_START)
2543 skb_pull(skb, L2CAP_SDULEN_SIZE);
2544
2545 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2546
2547 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2548 iv.iov_len -= L2CAP_FCS_SIZE;
2549
2550 /* Free skb */
2551 kfree_skb(skb);
2552
2553 if (err)
2554 break;
2555
2556 while (!skb_queue_empty(&old_frames) && !err) {
2557 /* Check next frame */
2558 skb = skb_peek(&old_frames);
2559
2560 if (is_initial_frame(bt_cb(skb)->control.sar))
2561 break;
2562
2563 __skb_unlink(skb, &old_frames);
2564
2565 /* Append data to SDU */
2566 if (pi->extended_control)
2567 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2568 else
2569 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2570
2571 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2572 skb_pull(skb, L2CAP_SDULEN_SIZE);
2573
2574 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2575
2576 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2577 iv.iov_len -= L2CAP_FCS_SIZE;
2578
2579 /* Free skb */
2580 kfree_skb(skb);
2581 }
2582
2583 if (err)
2584 break;
2585
2586 /* Segment data */
2587
2588 __skb_queue_head_init(&current_sdu);
2589
2590 /* skbs for the SDU were just freed, but the
2591 * resegmenting process could produce more, smaller
2592 * skbs due to smaller PDUs and reduced HCI MTU. The
2593 * overhead from the sk_buff structs could put us over
2594 * the sk_sndbuf limit.
2595 *
2596 * Since this code is running in response to a
2597 * received poll/final packet, it cannot block.
2598 * Therefore, memory allocation needs to be allowed by
2599 * falling back to bt_skb_alloc() (with
2600 * skb_set_owner_w() to maintain sk_wmem_alloc
2601 * correctly).
2602 */
2603 msg.msg_iovlen = iv.iov_len;
2604 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2605 msg.msg_iovlen, 1);
2606
2607 if (err || skb_queue_empty(&current_sdu)) {
2608 BT_DBG("Error %d resegmenting data for socket %p",
2609 err, sk);
2610 __skb_queue_purge(&current_sdu);
2611 break;
2612 }
2613
2614 /* Fix up first PDU SAR bits */
2615 if (!is_initial_frame(original_sar)) {
2616 BT_DBG("Changing SAR bits, %d PDUs",
2617 skb_queue_len(&current_sdu));
2618 skb = skb_peek(&current_sdu);
2619
2620 if (skb_queue_len(&current_sdu) == 1) {
2621 /* Change SAR from 'unsegmented' to 'end' */
2622 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2623 } else {
2624 struct l2cap_hdr *lh;
2625 size_t hdrlen;
2626
2627 /* Change SAR from 'start' to 'continue' */
2628 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2629
2630 /* Start frames contain 2 bytes for
2631 * sdulen and continue frames don't.
2632 * Must rewrite header to eliminate
2633 * sdulen and then adjust l2cap frame
2634 * length.
2635 */
2636 if (pi->extended_control)
2637 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2638 else
2639 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2640
2641 memmove(skb->data + L2CAP_SDULEN_SIZE,
2642 skb->data, hdrlen);
2643 skb_pull(skb, L2CAP_SDULEN_SIZE);
2644 lh = (struct l2cap_hdr *)skb->data;
2645 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2646 L2CAP_SDULEN_SIZE);
2647 }
2648 }
2649
2650 /* Add to queue */
2651 skb_queue_splice_tail(&current_sdu, queue);
2652 }
2653
2654 __skb_queue_purge(&old_frames);
2655 if (err)
2656 __skb_queue_purge(queue);
2657
2658 kfree(buf);
2659
2660 BT_DBG("Queue resegmented, err=%d", err);
2661 return err;
2662}
2663
2664static void l2cap_resegment_worker(struct work_struct *work)
2665{
2666 int err = 0;
2667 struct l2cap_resegment_work *seg_work =
2668 container_of(work, struct l2cap_resegment_work, work);
2669 struct sock *sk = seg_work->sk;
2670
2671 kfree(seg_work);
2672
2673 BT_DBG("sk %p", sk);
2674 lock_sock(sk);
2675
2676 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2677 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002678 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002679 return;
2680 }
2681
2682 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2683
2684 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2685
2686 if (skb_queue_empty(TX_QUEUE(sk)))
2687 sk->sk_send_head = NULL;
2688 else
2689 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2690
2691 if (err)
2692 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2693 else
2694 l2cap_ertm_send(sk);
2695
2696 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002697 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002698}
2699
2700static int l2cap_setup_resegment(struct sock *sk)
2701{
2702 struct l2cap_resegment_work *seg_work;
2703
2704 BT_DBG("sk %p", sk);
2705
2706 if (skb_queue_empty(TX_QUEUE(sk)))
2707 return 0;
2708
2709 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2710 if (!seg_work)
2711 return -ENOMEM;
2712
2713 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002714 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715 seg_work->sk = sk;
2716
2717 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2718 kfree(seg_work);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002719 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002720 return -ENOMEM;
2721 }
2722
2723 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2724
2725 return 0;
2726}
2727
2728static inline int l2cap_rmem_available(struct sock *sk)
2729{
2730 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2731 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2732 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2733}
2734
2735static inline int l2cap_rmem_full(struct sock *sk)
2736{
2737 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2738 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2739 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2740}
2741
2742void l2cap_amp_move_init(struct sock *sk)
2743{
2744 BT_DBG("sk %p", sk);
2745
2746 if (!l2cap_pi(sk)->conn)
2747 return;
2748
Peter Krystad1505bfa2012-06-08 10:47:27 -07002749 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP) || !enable_hs)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002750 return;
2751
2752 if (l2cap_pi(sk)->amp_id == 0) {
2753 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2754 return;
2755 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2756 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2757 amp_create_physical(l2cap_pi(sk)->conn, sk);
2758 } else {
2759 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2760 l2cap_pi(sk)->amp_move_state =
2761 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2762 l2cap_pi(sk)->amp_move_id = 0;
2763 l2cap_amp_move_setup(sk);
2764 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2765 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2766 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2767 }
2768}
2769
2770static void l2cap_chan_ready(struct sock *sk)
2771{
2772 struct sock *parent = bt_sk(sk)->parent;
2773
2774 BT_DBG("sk %p, parent %p", sk, parent);
2775
2776 l2cap_pi(sk)->conf_state = 0;
2777 l2cap_sock_clear_timer(sk);
2778
2779 if (!parent) {
2780 /* Outgoing channel.
2781 * Wake up socket sleeping on connect.
2782 */
2783 sk->sk_state = BT_CONNECTED;
2784 sk->sk_state_change(sk);
2785 } else {
2786 /* Incoming channel.
2787 * Wake up socket sleeping on accept.
2788 */
2789 parent->sk_data_ready(parent, 0);
2790 }
2791}
2792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793/* Copy frame to all raw sockets on that connection */
2794static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2795{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002796 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002798 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800 BT_DBG("conn %p", conn);
2801
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002802 read_lock(&l->lock);
2803 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2804 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 continue;
2806
2807 /* Don't send frame to the socket it came from */
2808 if (skb->sk == sk)
2809 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002810 nskb = skb_clone(skb, GFP_ATOMIC);
2811 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 continue;
2813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002814 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 kfree_skb(nskb);
2816 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818}
2819
2820/* ---- L2CAP signalling commands ---- */
2821static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2822 u8 code, u8 ident, u16 dlen, void *data)
2823{
2824 struct sk_buff *skb, **frag;
2825 struct l2cap_cmd_hdr *cmd;
2826 struct l2cap_hdr *lh;
2827 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002828 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002830 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2831 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
2833 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002834 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
2836 skb = bt_skb_alloc(count, GFP_ATOMIC);
2837 if (!skb)
2838 return NULL;
2839
2840 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002841 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002842
2843 if (conn->hcon->type == LE_LINK)
2844 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2845 else
2846 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847
2848 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2849 cmd->code = code;
2850 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002851 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
2853 if (dlen) {
2854 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2855 memcpy(skb_put(skb, count), data, count);
2856 data += count;
2857 }
2858
2859 len -= skb->len;
2860
2861 /* Continuation fragments (no L2CAP header) */
2862 frag = &skb_shinfo(skb)->frag_list;
2863 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2867 if (!*frag)
2868 goto fail;
2869
2870 memcpy(skb_put(*frag, count), data, count);
2871
2872 len -= count;
2873 data += count;
2874
2875 frag = &(*frag)->next;
2876 }
2877
2878 return skb;
2879
2880fail:
2881 kfree_skb(skb);
2882 return NULL;
2883}
2884
2885static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2886{
2887 struct l2cap_conf_opt *opt = *ptr;
2888 int len;
2889
2890 len = L2CAP_CONF_OPT_SIZE + opt->len;
2891 *ptr += len;
2892
2893 *type = opt->type;
2894 *olen = opt->len;
2895
2896 switch (opt->len) {
2897 case 1:
2898 *val = *((u8 *) opt->val);
2899 break;
2900
2901 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002902 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 break;
2904
2905 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002906 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 break;
2908
2909 default:
2910 *val = (unsigned long) opt->val;
2911 break;
2912 }
2913
2914 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2915 return len;
2916}
2917
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2919{
2920 struct l2cap_conf_opt *opt = *ptr;
2921
2922 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2923
2924 opt->type = type;
2925 opt->len = len;
2926
2927 switch (len) {
2928 case 1:
2929 *((u8 *) opt->val) = val;
2930 break;
2931
2932 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002933 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 break;
2935
2936 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002937 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 break;
2939
2940 default:
2941 memcpy(opt->val, (void *) val, len);
2942 break;
2943 }
2944
2945 *ptr += L2CAP_CONF_OPT_SIZE + len;
2946}
2947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948static void l2cap_ertm_ack_timeout(struct work_struct *work)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002949{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 struct delayed_work *delayed =
2951 container_of(work, struct delayed_work, work);
2952 struct l2cap_pinfo *pi =
2953 container_of(delayed, struct l2cap_pinfo, ack_work);
2954 struct sock *sk = (struct sock *)pi;
2955 u16 frames_to_ack;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002957 BT_DBG("sk %p", sk);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002958
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002959 if (!sk)
2960 return;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002962 lock_sock(sk);
2963
2964 if (!l2cap_pi(sk)->conn) {
2965 release_sock(sk);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002966 return;
2967 }
2968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2970 l2cap_pi(sk)->last_acked_seq,
2971 l2cap_pi(sk));
2972
2973 if (frames_to_ack)
2974 l2cap_ertm_send_rr_or_rnr(sk, 0);
2975
2976 release_sock(sk);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002977}
2978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002980{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002981 struct delayed_work *delayed =
2982 container_of(work, struct delayed_work, work);
2983 struct l2cap_pinfo *pi =
2984 container_of(delayed, struct l2cap_pinfo, retrans_work);
2985 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002987 BT_DBG("sk %p", sk);
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02002988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002989 if (!sk)
2990 return;
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02002991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002992 lock_sock(sk);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02002993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002994 if (!l2cap_pi(sk)->conn) {
2995 release_sock(sk);
2996 return;
2997 }
Szymon Janc09bfb2e2012-01-11 10:59:49 +01002998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
3000 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003001}
3002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003003static void l2cap_ertm_monitor_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003004{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003005 struct delayed_work *delayed =
3006 container_of(work, struct delayed_work, work);
3007 struct l2cap_pinfo *pi =
3008 container_of(delayed, struct l2cap_pinfo, monitor_work);
3009 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003011 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003013 if (!sk)
3014 return;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003016 lock_sock(sk);
3017
3018 if (!l2cap_pi(sk)->conn) {
3019 release_sock(sk);
3020 return;
3021 }
3022
3023 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
3024
3025 release_sock(sk);
3026}
3027
3028static inline void l2cap_ertm_init(struct sock *sk)
3029{
3030 l2cap_pi(sk)->next_tx_seq = 0;
3031 l2cap_pi(sk)->expected_tx_seq = 0;
3032 l2cap_pi(sk)->expected_ack_seq = 0;
3033 l2cap_pi(sk)->unacked_frames = 0;
3034 l2cap_pi(sk)->buffer_seq = 0;
3035 l2cap_pi(sk)->frames_sent = 0;
3036 l2cap_pi(sk)->last_acked_seq = 0;
3037 l2cap_pi(sk)->sdu = NULL;
3038 l2cap_pi(sk)->sdu_last_frag = NULL;
3039 l2cap_pi(sk)->sdu_len = 0;
3040 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
3041
3042 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3043 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
3044
3045 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
3046 l2cap_pi(sk)->rx_state);
3047
3048 l2cap_pi(sk)->amp_id = 0;
3049 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
3050 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
3051 l2cap_pi(sk)->amp_move_reqseq = 0;
3052 l2cap_pi(sk)->amp_move_event = 0;
3053
3054 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
3055 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
3056 l2cap_ertm_retrans_timeout);
3057 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
3058 l2cap_ertm_monitor_timeout);
3059 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
3060 skb_queue_head_init(SREJ_QUEUE(sk));
3061 skb_queue_head_init(TX_QUEUE(sk));
3062
3063 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
3064 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
3065 l2cap_pi(sk)->remote_tx_win);
3066}
3067
3068void l2cap_ertm_destruct(struct sock *sk)
3069{
3070 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
3071 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
3072}
3073
3074void l2cap_ertm_shutdown(struct sock *sk)
3075{
3076 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
3077 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
3078 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
3079}
3080
3081void l2cap_ertm_recv_done(struct sock *sk)
3082{
3083 lock_sock(sk);
3084
Mat Martineau28eb3fa2012-02-09 16:06:12 -08003085 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM ||
3086 sk->sk_state != BT_CONNECTED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003087 release_sock(sk);
3088 return;
3089 }
3090
3091 /* Consume any queued incoming frames and update local busy status */
3092 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
3093 l2cap_ertm_rx_queued_iframes(sk))
3094 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
3095 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3096 l2cap_rmem_available(sk))
3097 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
3098
3099 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003100}
3101
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003102static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3103{
3104 switch (mode) {
3105 case L2CAP_MODE_STREAMING:
3106 case L2CAP_MODE_ERTM:
3107 if (l2cap_mode_supported(mode, remote_feat_mask))
3108 return mode;
3109 /* fall through */
3110 default:
3111 return L2CAP_MODE_BASIC;
3112 }
3113}
3114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003115static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003116{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3118 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3119 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3120 pi->extended_control = 1;
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003121 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003122 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3123 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3124
3125 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3126 pi->extended_control = 0;
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003127 }
Mat Martineauda318242012-06-22 11:36:18 -07003128 pi->ack_win = pi->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003129}
3130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003131static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3132 struct hci_ext_fs *new,
3133 struct hci_ext_fs *agg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003135 *agg = *cur;
3136 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3137 /* current flow spec has known rate */
3138 if ((new->max_sdu == 0xFFFF) ||
3139 (new->sdu_arr_time == 0xFFFFFFFF)) {
3140 /* new fs has unknown rate, so aggregate is unknown */
3141 agg->max_sdu = 0xFFFF;
3142 agg->sdu_arr_time = 0xFFFFFFFF;
3143 } else {
3144 /* new fs has known rate, so aggregate is known */
3145 u64 cur_rate;
3146 u64 new_rate;
3147 cur_rate = cur->max_sdu * 1000000ULL;
3148 if (cur->sdu_arr_time)
3149 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3150 new_rate = new->max_sdu * 1000000ULL;
3151 if (new->sdu_arr_time)
3152 new_rate = div_u64(new_rate, new->sdu_arr_time);
3153 cur_rate = cur_rate + new_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003154 if (cur_rate)
3155 agg->sdu_arr_time = div64_u64(
3156 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003157 }
3158 }
3159}
3160
3161static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3162{
3163 struct hci_ext_fs tx_fs;
3164 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003166 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003168 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3169 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3170 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3171 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3172 return 0;
3173
3174 l2cap_aggregate_fs(&chan->tx_fs,
3175 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3176 l2cap_aggregate_fs(&chan->rx_fs,
3177 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3178 hci_chan_modify(chan, &tx_fs, &rx_fs);
3179 return 1;
3180}
3181
3182static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3183 struct hci_ext_fs *old,
3184 struct hci_ext_fs *agg)
3185{
3186 *agg = *cur;
3187 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3188 u64 cur_rate;
3189 u64 old_rate;
3190 cur_rate = cur->max_sdu * 1000000ULL;
3191 if (cur->sdu_arr_time)
3192 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3193 old_rate = old->max_sdu * 1000000ULL;
3194 if (old->sdu_arr_time)
3195 old_rate = div_u64(old_rate, old->sdu_arr_time);
3196 cur_rate = cur_rate - old_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003197 if (cur_rate)
3198 agg->sdu_arr_time = div64_u64(
3199 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003200 }
3201}
3202
3203static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3204{
3205 struct hci_ext_fs tx_fs;
3206 struct hci_ext_fs rx_fs;
3207
3208 BT_DBG("chan %p", chan);
3209
3210 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3211 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3212 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3213 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3214 return 0;
3215
3216 l2cap_deaggregate_fs(&chan->tx_fs,
3217 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3218 l2cap_deaggregate_fs(&chan->rx_fs,
3219 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3220 hci_chan_modify(chan, &tx_fs, &rx_fs);
3221 return 1;
3222}
3223
Peter Krystada8417e62012-03-21 16:58:17 -07003224static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct sock *sk)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003225{
Peter Krystada8417e62012-03-21 16:58:17 -07003226 struct l2cap_pinfo *pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003227 struct hci_dev *hdev;
3228 struct hci_conn *hcon;
3229 struct hci_chan *chan;
3230
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08003231 hdev = hci_dev_get(amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003232 if (!hdev)
3233 return NULL;
3234
3235 BT_DBG("hdev %s", hdev->name);
3236
3237 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003238 if (!hcon) {
3239 chan = NULL;
3240 goto done;
3241 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242
3243 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3244 if (chan) {
3245 l2cap_aggregate(chan, pi);
Peter Krystada8417e62012-03-21 16:58:17 -07003246 sock_hold(sk);
3247 chan->l2cap_sk = sk;
3248 hci_chan_hold(chan);
3249 pi->ampchan = chan;
Peter Krystadf7dcc792011-11-14 15:11:58 -08003250 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003251 }
3252
Peter Krystada8417e62012-03-21 16:58:17 -07003253 chan = hci_chan_add(hdev);
3254 if (chan) {
3255 chan->conn = hcon;
3256 sock_hold(sk);
3257 chan->l2cap_sk = sk;
3258 hci_chan_hold(chan);
3259 pi->ampchan = chan;
3260 hci_chan_create(chan,
3261 (struct hci_ext_fs *) &pi->local_fs,
3262 (struct hci_ext_fs *) &pi->remote_fs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003263 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08003264done:
3265 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003266 return chan;
3267}
3268
Mat Martineau966dcfa2011-12-12 10:45:31 -08003269static void l2cap_get_ertm_timeouts(struct l2cap_conf_rfc *rfc,
3270 struct l2cap_pinfo *pi)
3271{
3272 if (pi->amp_id && pi->ampcon) {
3273 u64 ertm_to = pi->ampcon->hdev->amp_be_flush_to;
3274
3275 /* Class 1 devices have must have ERTM timeouts
3276 * exceeding the Link Supervision Timeout. The
3277 * default Link Supervision Timeout for AMP
3278 * controllers is 10 seconds.
3279 *
3280 * Class 1 devices use 0xffffffff for their
3281 * best-effort flush timeout, so the clamping logic
3282 * will result in a timeout that meets the above
3283 * requirement. ERTM timeouts are 16-bit values, so
3284 * the maximum timeout is 65.535 seconds.
3285 */
3286
3287 /* Convert timeout to milliseconds and round */
3288 ertm_to = div_u64(ertm_to + 999, 1000);
3289
3290 /* This is the recommended formula for class 2 devices
3291 * that start ERTM timers when packets are sent to the
3292 * controller.
3293 */
3294 ertm_to = 3 * ertm_to + 500;
3295
3296 if (ertm_to > 0xffff)
3297 ertm_to = 0xffff;
3298
3299 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3300 rfc->monitor_timeout = rfc->retrans_timeout;
3301 } else {
3302 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3303 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3304 }
3305}
3306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003307int l2cap_build_conf_req(struct sock *sk, void *data)
3308{
3309 struct l2cap_pinfo *pi = l2cap_pi(sk);
3310 struct l2cap_conf_req *req = data;
3311 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3312 void *ptr = req->data;
3313
Mallikarjuna GB647b0042012-06-22 11:57:29 +05303314 BT_DBG("sk %p mode %d", sk, pi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003315
3316 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003317 goto done;
3318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003319 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003320 case L2CAP_MODE_STREAMING:
3321 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003322 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003323 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003324
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003325 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003326 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003327 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003328 break;
3329 }
3330
3331done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003332 if (pi->imtu != L2CAP_DEFAULT_MTU)
3333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003335 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003336 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003337 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3338 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003339 break;
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003340 rfc.txwin_size = 0;
3341 rfc.max_transmit = 0;
3342 rfc.retrans_timeout = 0;
3343 rfc.monitor_timeout = 0;
3344 rfc.max_pdu_size = 0;
3345
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3347 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003348 break;
3349
3350 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003351 l2cap_setup_txwin(pi);
3352 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3353 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3354 else
3355 rfc.txwin_size = pi->tx_win;
Mat Martineau966dcfa2011-12-12 10:45:31 -08003356 rfc.max_transmit = pi->max_tx;
3357 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3358 l2cap_get_ertm_timeouts(&rfc, pi);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003360 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3361 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003362
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003363 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3364 (unsigned long) &rfc);
3365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003366 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3367 pi->extended_control) {
3368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3369 pi->tx_win);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003370 }
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003372 if (pi->amp_id) {
3373 /* default best effort extended flow spec */
3374 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3375 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3377 sizeof(fs), (unsigned long) &fs);
3378 }
3379
3380 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003381 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003383 if (pi->fcs == L2CAP_FCS_NONE ||
3384 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3385 pi->fcs = L2CAP_FCS_NONE;
3386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003387 }
3388 break;
3389
3390 case L2CAP_MODE_STREAMING:
Mat Martineau332472c2012-05-07 09:52:04 -07003391 l2cap_setup_txwin(pi);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003392 rfc.txwin_size = 0;
3393 rfc.max_transmit = 0;
3394 rfc.retrans_timeout = 0;
3395 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003396 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003397 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3398 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003399
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003400 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3401 (unsigned long) &rfc);
3402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003403 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3404 pi->extended_control) {
3405 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3406 }
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003408 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003409 break;
3410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003411 if (pi->fcs == L2CAP_FCS_NONE ||
3412 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3413 pi->fcs = L2CAP_FCS_NONE;
3414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003415 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003416 break;
3417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003419 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003420 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
3422 return ptr - data;
3423}
3424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003425
3426static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct l2cap_conf_req *req = data;
3430 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3431 void *ptr = req->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003432
3433 BT_DBG("sk %p", sk);
3434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003435 switch (pi->mode) {
3436 case L2CAP_MODE_ERTM:
3437 rfc.mode = L2CAP_MODE_ERTM;
3438 rfc.txwin_size = pi->tx_win;
3439 rfc.max_transmit = pi->max_tx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003440 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau966dcfa2011-12-12 10:45:31 -08003441 l2cap_get_ertm_timeouts(&rfc, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003442 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3443 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3444
3445 break;
3446
3447 default:
3448 return -ECONNREFUSED;
3449 }
3450
3451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3452 (unsigned long) &rfc);
3453
3454 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003455 /* TODO assign fcs for br/edr based on socket config option */
Mat Martineau966dcfa2011-12-12 10:45:31 -08003456 /* FCS is not used with AMP because it is redundant - lower
3457 * layers already include a checksum. */
3458 if (pi->amp_id)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003459 pi->local_conf.fcs = L2CAP_FCS_NONE;
3460 else
3461 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3462
Mat Martineau966dcfa2011-12-12 10:45:31 -08003463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->local_conf.fcs);
3464 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003465 }
3466
3467 req->dcid = cpu_to_le16(pi->dcid);
3468 req->flags = cpu_to_le16(0);
3469
3470 return ptr - data;
3471}
3472
3473static int l2cap_parse_conf_req(struct sock *sk, void *data)
3474{
3475 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003476 struct l2cap_conf_rsp *rsp = data;
3477 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003478 void *req = pi->conf_req;
3479 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003480 int type, hint, olen;
3481 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003482 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003483 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003484 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003485 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003487 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003488
Mallikarjuna GB647b0042012-06-22 11:57:29 +05303489 if (pi->omtu > mtu)
3490 mtu = pi->omtu;
3491
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003492 while (len >= L2CAP_CONF_OPT_SIZE) {
3493 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003495 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003496 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003497
3498 switch (type) {
3499 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003500 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003501 break;
3502
3503 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003504 pi->flush_to = val;
3505 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3506 result = L2CAP_CONF_UNACCEPT;
3507 else
3508 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003509 break;
3510
3511 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003512 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3513 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003514 break;
3515
Marcel Holtmann6464f352007-10-20 13:39:51 +02003516 case L2CAP_CONF_RFC:
3517 if (olen == sizeof(rfc))
3518 memcpy(&rfc, (void *) val, olen);
3519 break;
3520
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003521 case L2CAP_CONF_FCS:
3522 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003523 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3524 pi->remote_conf.fcs = val;
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003525 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003527 case L2CAP_CONF_EXT_FS:
3528 if (olen == sizeof(fs)) {
3529 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3530 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3531 result = L2CAP_CONF_UNACCEPT;
3532 break;
3533 }
3534 memcpy(&fs, (void *) val, olen);
3535 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3536 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3537 break;
3538 }
3539 pi->remote_conf.flush_to =
3540 le32_to_cpu(fs.flush_to);
3541 pi->remote_fs.id = fs.id;
3542 pi->remote_fs.type = fs.type;
3543 pi->remote_fs.max_sdu =
3544 le16_to_cpu(fs.max_sdu);
3545 pi->remote_fs.sdu_arr_time =
3546 le32_to_cpu(fs.sdu_arr_time);
3547 pi->remote_fs.acc_latency =
3548 le32_to_cpu(fs.acc_latency);
3549 pi->remote_fs.flush_to =
3550 le32_to_cpu(fs.flush_to);
3551 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003552 break;
3553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003554 case L2CAP_CONF_EXT_WINDOW:
3555 pi->extended_control = 1;
3556 pi->remote_tx_win = val;
3557 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3558 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003559 break;
3560
3561 default:
3562 if (hint)
3563 break;
3564
3565 result = L2CAP_CONF_UNKNOWN;
3566 *((u8 *) ptr++) = type;
3567 break;
3568 }
3569 }
3570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003571 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003572 goto done;
3573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003574 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003575 case L2CAP_MODE_STREAMING:
3576 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003577 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3578 pi->mode = l2cap_select_mode(rfc.mode,
3579 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003580 break;
3581 }
3582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003583 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003584 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003585
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003586 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003587 }
3588
3589done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003590 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003591 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003592 rfc.mode = pi->mode;
Mallikarjuna GB647b0042012-06-22 11:57:29 +05303593 if (mtu > L2CAP_DEFAULT_MTU)
3594 pi->omtu = mtu;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003596 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003597 return -ECONNREFUSED;
3598
3599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3600 sizeof(rfc), (unsigned long) &rfc);
3601 }
3602
3603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003604 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3605 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3606 return -ECONNREFUSED;
3607
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003608 if (result == L2CAP_CONF_SUCCESS) {
3609 /* Configure output options and let the other side know
3610 * which ones we don't like. */
3611
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003612 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003613 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003614 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -07003615 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003616 pi->omtu = mtu;
3617 pi->conf_state |= L2CAP_CONF_MTU_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003618 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003619 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003620
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003621 switch (rfc.mode) {
3622 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003623 pi->fcs = L2CAP_FCS_NONE;
3624 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003625 break;
3626
3627 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003628 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3629 pi->remote_tx_win = rfc.txwin_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003630 pi->remote_max_tx = rfc.max_transmit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003631 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineau966dcfa2011-12-12 10:45:31 -08003632 l2cap_get_ertm_timeouts(&rfc, pi);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003633
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003634 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003635
3636 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3637 sizeof(rfc), (unsigned long) &rfc);
3638
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003639 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3641 sizeof(fs), (unsigned long) &fs);
3642
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003643 break;
3644
3645 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003646 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003647
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003648 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003649
3650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3651 sizeof(rfc), (unsigned long) &rfc);
3652
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003653 break;
3654
3655 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003656 result = L2CAP_CONF_UNACCEPT;
3657
3658 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003659 rfc.mode = pi->mode;
3660 }
3661
3662 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3663 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3664 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3665 result = L2CAP_CONF_PENDING;
3666
3667 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3668 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003669 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003670 /* Trigger logical link creation only on AMP */
3671
Peter Krystada8417e62012-03-21 16:58:17 -07003672 chan = l2cap_chan_admit(pi->amp_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003673 if (!chan)
3674 return -ECONNREFUSED;
3675
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003676 if (chan->state == BT_CONNECTED)
3677 l2cap_create_cfm(chan, 0);
3678 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003679 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003680
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003681 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003682 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003683 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003684 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003685 rsp->result = cpu_to_le16(result);
3686 rsp->flags = cpu_to_le16(0x0000);
3687
3688 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689}
3690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003691static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003692{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003693 struct l2cap_pinfo *pi = l2cap_pi(sk);
3694 struct l2cap_conf_rsp *rsp = data;
3695 void *ptr = rsp->data;
3696 void *req = pi->conf_req;
3697 int len = pi->conf_len;
3698 int type, hint, olen;
3699 unsigned long val;
3700 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3701 struct l2cap_conf_ext_fs fs;
3702 u16 mtu = pi->omtu;
3703 u16 tx_win = pi->remote_tx_win;
3704 u16 result = L2CAP_CONF_SUCCESS;
3705
3706 BT_DBG("sk %p", sk);
3707
3708 while (len >= L2CAP_CONF_OPT_SIZE) {
3709 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3710
3711 hint = type & L2CAP_CONF_HINT;
3712 type &= L2CAP_CONF_MASK;
3713
3714 switch (type) {
3715 case L2CAP_CONF_MTU:
3716 mtu = val;
3717 break;
3718
3719 case L2CAP_CONF_FLUSH_TO:
3720 if (pi->amp_move_id)
3721 result = L2CAP_CONF_UNACCEPT;
3722 else
3723 pi->remote_conf.flush_to = val;
3724 break;
3725
3726 case L2CAP_CONF_QOS:
3727 if (pi->amp_move_id)
3728 result = L2CAP_CONF_UNACCEPT;
3729 break;
3730
3731 case L2CAP_CONF_RFC:
3732 if (olen == sizeof(rfc))
3733 memcpy(&rfc, (void *) val, olen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003734 break;
3735
3736 case L2CAP_CONF_FCS:
3737 pi->remote_conf.fcs = val;
3738 break;
3739
3740 case L2CAP_CONF_EXT_FS:
3741 if (olen == sizeof(fs)) {
3742 memcpy(&fs, (void *) val, olen);
3743 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3744 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3745 else {
3746 pi->remote_conf.flush_to =
3747 le32_to_cpu(fs.flush_to);
3748 }
3749 }
3750 break;
3751
3752 case L2CAP_CONF_EXT_WINDOW:
3753 tx_win = val;
3754 break;
3755
3756 default:
3757 if (hint)
3758 break;
3759
3760 result = L2CAP_CONF_UNKNOWN;
3761 *((u8 *) ptr++) = type;
3762 break;
3763 }
3764 }
3765
3766 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3767 result, pi->mode, rfc.mode);
3768
Mat Martineau966dcfa2011-12-12 10:45:31 -08003769 if (pi->mode != rfc.mode || rfc.mode == L2CAP_MODE_BASIC)
3770 result = L2CAP_CONF_UNACCEPT;
3771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003772 if (result == L2CAP_CONF_SUCCESS) {
3773 /* Configure output options and let the other side know
3774 * which ones we don't like. */
3775
3776 /* Don't allow mtu to decrease. */
3777 if (mtu < pi->omtu)
3778 result = L2CAP_CONF_UNACCEPT;
3779
3780 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3781
3782 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3783
3784 /* Don't allow extended transmit window to change. */
3785 if (tx_win != pi->remote_tx_win) {
3786 result = L2CAP_CONF_UNACCEPT;
3787 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3788 pi->remote_tx_win);
3789 }
3790
Mat Martineau966dcfa2011-12-12 10:45:31 -08003791 pi->remote_mps = rfc.max_pdu_size;
3792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003793 if (rfc.mode == L2CAP_MODE_ERTM) {
Mat Martineau966dcfa2011-12-12 10:45:31 -08003794 l2cap_get_ertm_timeouts(&rfc, pi);
3795 } else {
3796 rfc.retrans_timeout = 0;
3797 rfc.monitor_timeout = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003798 }
3799
Mat Martineau966dcfa2011-12-12 10:45:31 -08003800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3801 sizeof(rfc), (unsigned long) &rfc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003802 }
3803
3804 if (result != L2CAP_CONF_SUCCESS)
3805 goto done;
3806
Mat Martineau966dcfa2011-12-12 10:45:31 -08003807 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003808
Mat Martineau966dcfa2011-12-12 10:45:31 -08003809 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003810 pi->flush_to = pi->remote_conf.flush_to;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003811
3812done:
3813 rsp->scid = cpu_to_le16(pi->dcid);
3814 rsp->result = cpu_to_le16(result);
3815 rsp->flags = cpu_to_le16(0x0000);
3816
3817 return ptr - data;
3818}
3819
3820static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3821{
3822 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003823 struct l2cap_conf_req *req = data;
3824 void *ptr = req->data;
3825 int type, olen;
3826 unsigned long val;
3827 struct l2cap_conf_rfc rfc;
3828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003829 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003830
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003831 /* Initialize rfc in case no rfc option is received */
3832 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003833 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3834 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3835 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003836
3837 while (len >= L2CAP_CONF_OPT_SIZE) {
3838 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3839
3840 switch (type) {
3841 case L2CAP_CONF_MTU:
3842 if (val < L2CAP_DEFAULT_MIN_MTU) {
3843 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003844 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003845 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003846 pi->imtu = val;
3847 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003848 break;
3849
3850 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003851 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003852 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003853 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003854 break;
3855
3856 case L2CAP_CONF_RFC:
3857 if (olen == sizeof(rfc))
3858 memcpy(&rfc, (void *)val, olen);
3859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003860 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3861 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003862 return -ECONNREFUSED;
3863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003864 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003865
3866 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3867 sizeof(rfc), (unsigned long) &rfc);
3868 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003870 case L2CAP_CONF_EXT_WINDOW:
Mat Martineauda318242012-06-22 11:36:18 -07003871 pi->ack_win = min_t(u16, val, pi->ack_win);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003872
3873 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3874 2, pi->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003875 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003877 default:
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003878 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003879 }
3880 }
3881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003882 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003883 return -ECONNREFUSED;
3884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003885 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003886
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003887 if (*result == L2CAP_CONF_SUCCESS) {
3888 switch (rfc.mode) {
3889 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003890 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3891 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3892 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauda318242012-06-22 11:36:18 -07003893 if (!pi->extended_control) {
3894 pi->ack_win = min_t(u16, pi->ack_win,
3895 rfc.txwin_size);
3896 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003897 break;
3898 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003899 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003900 }
3901 }
3902
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003903 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003904 req->flags = cpu_to_le16(0x0000);
3905
3906 return ptr - data;
3907}
3908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003909static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910{
3911 struct l2cap_conf_rsp *rsp = data;
3912 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003914 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003916 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003917 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003918 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
3920 return ptr - data;
3921}
3922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003923static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003924{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003925 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003926 int type, olen;
3927 unsigned long val;
3928 struct l2cap_conf_rfc rfc;
Mat Martineauda318242012-06-22 11:36:18 -07003929 u16 txwin_ext = pi->ack_win;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003931 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003932
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003933 /* Initialize rfc in case no rfc option is received */
3934 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003935 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3936 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3937 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineauda318242012-06-22 11:36:18 -07003938 rfc.txwin_size = min_t(u16, pi->ack_win, L2CAP_DEFAULT_TX_WINDOW);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003940 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003941 return;
3942
3943 while (len >= L2CAP_CONF_OPT_SIZE) {
3944 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3945
3946 switch (type) {
3947 case L2CAP_CONF_RFC:
3948 if (olen == sizeof(rfc))
3949 memcpy(&rfc, (void *)val, olen);
Mat Martineauda318242012-06-22 11:36:18 -07003950 break;
3951 case L2CAP_CONF_EXT_WINDOW:
3952 txwin_ext = val;
3953 break;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003954 }
3955 }
3956
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003957 switch (rfc.mode) {
3958 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003959 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3960 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3961 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauda318242012-06-22 11:36:18 -07003962 if (pi->extended_control)
3963 pi->ack_win = min_t(u16, pi->ack_win, txwin_ext);
3964 else
3965 pi->ack_win = min_t(u16, pi->ack_win, rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003966 break;
3967 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003968 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003969 }
3970}
3971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003972static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3973{
3974 struct l2cap_pinfo *pi = l2cap_pi(sk);
3975 int type, olen;
3976 unsigned long val;
3977 struct l2cap_conf_ext_fs fs;
3978
3979 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3980
3981 while (len >= L2CAP_CONF_OPT_SIZE) {
3982 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3983 if ((type == L2CAP_CONF_EXT_FS) &&
3984 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3985 memcpy(&fs, (void *)val, olen);
3986 pi->local_fs.id = fs.id;
3987 pi->local_fs.type = fs.type;
3988 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3989 pi->local_fs.sdu_arr_time =
3990 le32_to_cpu(fs.sdu_arr_time);
3991 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3992 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3993 break;
3994 }
3995 }
3996
3997}
3998
3999static int l2cap_finish_amp_move(struct sock *sk)
4000{
4001 struct l2cap_pinfo *pi;
4002 int err;
4003
4004 BT_DBG("sk %p", sk);
4005
4006 pi = l2cap_pi(sk);
4007
4008 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
4009 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
4010
4011 if (pi->ampcon)
4012 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
4013 else
4014 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
4015
4016 err = l2cap_setup_resegment(sk);
4017
4018 return err;
4019}
4020
4021static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
4022 u16 result)
4023{
4024 int err = 0;
4025 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027
4028 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
4029
4030 if (pi->reconf_state == L2CAP_RECONF_NONE)
4031 return -ECONNREFUSED;
4032
4033 if (result == L2CAP_CONF_SUCCESS) {
4034 while (len >= L2CAP_CONF_OPT_SIZE) {
4035 int type, olen;
4036 unsigned long val;
4037
4038 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4039
4040 if (type == L2CAP_CONF_RFC) {
4041 if (olen == sizeof(rfc))
4042 memcpy(&rfc, (void *)val, olen);
Mat Martineau966dcfa2011-12-12 10:45:31 -08004043
4044 if (rfc.mode != pi->mode) {
4045 l2cap_send_disconn_req(pi->conn, sk,
4046 ECONNRESET);
4047 return -ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004048 }
Mat Martineau966dcfa2011-12-12 10:45:31 -08004049
4050 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 }
4052 }
4053 }
4054
Mat Martineau966dcfa2011-12-12 10:45:31 -08004055 BT_ERR("Expected RFC option was missing, using existing values");
4056
4057 rfc.mode = pi->mode;
4058 rfc.retrans_timeout = cpu_to_le16(pi->retrans_timeout);
4059 rfc.monitor_timeout = cpu_to_le16(pi->monitor_timeout);
4060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061done:
4062 l2cap_ertm_stop_ack_timer(pi);
4063 l2cap_ertm_stop_retrans_timer(pi);
4064 l2cap_ertm_stop_monitor_timer(pi);
4065
Mat Martineau966dcfa2011-12-12 10:45:31 -08004066 pi->mps = le16_to_cpu(rfc.max_pdu_size);
4067 if (pi->mode == L2CAP_MODE_ERTM) {
4068 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4069 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4070 }
4071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004072 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
4073 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
4074
4075 /* Respond to poll */
4076 err = l2cap_answer_move_poll(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004077 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004078 if (pi->mode == L2CAP_MODE_ERTM) {
4079 l2cap_ertm_tx(sk, NULL, NULL,
4080 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
4081 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
4082 }
4083 }
4084
4085 return err;
4086}
4087
4088
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004089static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4090{
4091 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
4092
4093 if (rej->reason != 0x0000)
4094 return 0;
4095
4096 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4097 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004098 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01004099
4100 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004101 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004102
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004103 l2cap_conn_start(conn);
4104 }
4105
4106 return 0;
4107}
4108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004109static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd,
4111 u8 *data, u8 rsp_code,
4112 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004114 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4116 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04004117 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004118 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119
4120 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004121 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
4123 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
4124
4125 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004126 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
4127 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 result = L2CAP_CR_BAD_PSM;
4129 goto sendresp;
4130 }
4131
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00004132 bh_lock_sock(parent);
4133
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004134 /* Check if the ACL is secure enough (if not SDP) */
4135 if (psm != cpu_to_le16(0x0001) &&
4136 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01004137 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004138 result = L2CAP_CR_SEC_BLOCK;
4139 goto response;
4140 }
4141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 result = L2CAP_CR_NO_MEM;
4143
4144 /* Check for backlog size */
4145 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004146 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 goto response;
4148 }
4149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004150 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
4151 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 goto response;
4153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004154 write_lock_bh(&list->lock);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03004155
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004157 if (__l2cap_get_chan_by_dcid(list, scid)) {
4158 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004160 l2cap_sock_kill(sk);
4161 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 goto response;
4163 }
4164
4165 hci_conn_hold(conn->hcon);
4166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004167 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 bacpy(&bt_sk(sk)->src, conn->src);
4169 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004170 l2cap_pi(sk)->psm = psm;
4171 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004173 bt_accept_enqueue(parent, sk);
4174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004175 __l2cap_chan_add(conn, sk);
4176 dcid = l2cap_pi(sk)->scid;
4177 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004179 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004181 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182
Marcel Holtmann984947d2009-02-06 23:35:19 +01004183 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004184 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004185 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004186 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004187 result = L2CAP_CR_PEND;
4188 status = L2CAP_CS_AUTHOR_PEND;
4189 parent->sk_data_ready(parent, 0);
4190 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004191 /* Force pending result for AMP controllers.
4192 * The connection will succeed after the
4193 * physical link is up. */
4194 if (amp_id) {
4195 sk->sk_state = BT_CONNECT2;
4196 result = L2CAP_CR_PEND;
4197 } else {
4198 sk->sk_state = BT_CONFIG;
4199 result = L2CAP_CR_SUCCESS;
4200 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004201 status = L2CAP_CS_NO_INFO;
4202 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004203 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004204 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004205 result = L2CAP_CR_PEND;
4206 status = L2CAP_CS_AUTHEN_PEND;
4207 }
4208 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004209 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004210 result = L2CAP_CR_PEND;
4211 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 }
4213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004214 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
4216response:
4217 bh_unlock_sock(parent);
4218
4219sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004220 rsp.scid = cpu_to_le16(scid);
4221 rsp.dcid = cpu_to_le16(dcid);
4222 rsp.result = cpu_to_le16(result);
4223 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004224 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004226 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004227 struct l2cap_info_req info;
4228 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4229
4230 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4231 conn->info_ident = l2cap_get_ident(conn);
4232
4233 mod_timer(&conn->info_timer, jiffies +
4234 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4235
4236 l2cap_send_cmd(conn, conn->info_ident,
4237 L2CAP_INFO_REQ, sizeof(info), &info);
4238 }
4239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004240 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004241 result == L2CAP_CR_SUCCESS) {
4242 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004243 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004244 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004245 l2cap_build_conf_req(sk, buf), buf);
4246 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004247 }
4248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004249 return sk;
4250}
4251
4252static inline int l2cap_connect_req(struct l2cap_conn *conn,
4253 struct l2cap_cmd_hdr *cmd, u8 *data)
4254{
4255 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 return 0;
4257}
4258
4259static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4260{
4261 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4262 u16 scid, dcid, result, status;
4263 struct sock *sk;
4264 u8 req[128];
4265
4266 scid = __le16_to_cpu(rsp->scid);
4267 dcid = __le16_to_cpu(rsp->dcid);
4268 result = __le16_to_cpu(rsp->result);
4269 status = __le16_to_cpu(rsp->status);
4270
4271 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004272
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004274 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4275 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004276 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004278 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4279 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004280 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 }
4282
4283 switch (result) {
4284 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004285 sk->sk_state = BT_CONFIG;
4286 l2cap_pi(sk)->ident = 0;
4287 l2cap_pi(sk)->dcid = dcid;
4288 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004289
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004290 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004291 break;
4292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004293 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004296 l2cap_build_conf_req(sk, req), req);
4297 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 break;
4299
4300 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004301 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 break;
4303
4304 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004305 /* don't delete l2cap channel if sk is owned by user */
4306 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004307 sk->sk_state = BT_DISCONN;
4308 l2cap_sock_clear_timer(sk);
4309 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004310 break;
4311 }
4312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004313 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 break;
4315 }
4316
4317 bh_unlock_sock(sk);
4318 return 0;
4319}
4320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004321static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004322{
4323 /* FCS is enabled only in ERTM or streaming mode, if one or both
4324 * sides request it.
4325 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004326 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4327 pi->fcs = L2CAP_FCS_NONE;
4328 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4329 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004330}
4331
Al Viro88219a02007-07-29 00:17:25 -07004332static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333{
4334 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4335 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004336 u8 rspbuf[64];
4337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004339 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004340 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341
4342 dcid = __le16_to_cpu(req->dcid);
4343 flags = __le16_to_cpu(req->flags);
4344
4345 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004347 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4348 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 return -ENOENT;
4350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004351 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4352 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4353 sk->sk_state, l2cap_pi(sk)->rx_state,
4354 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4355 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004357 /* Detect a reconfig request due to channel move between
4358 * BR/EDR and AMP
4359 */
4360 if (sk->sk_state == BT_CONNECTED &&
4361 l2cap_pi(sk)->rx_state ==
4362 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4363 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004365 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4366 amp_move_reconf = 1;
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03004367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004368 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004369 struct l2cap_cmd_rej rej;
4370
4371 rej.reason = cpu_to_le16(0x0002);
4372 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4373 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004374 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004375 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004376
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004377 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004378 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004379 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004380 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004381 l2cap_build_conf_rsp(sk, rspbuf,
4382 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004383 goto unlock;
4384 }
4385
4386 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004387 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4388 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389
4390 if (flags & 0x0001) {
4391 /* Incomplete config. Send empty response. */
4392 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004393 l2cap_build_conf_rsp(sk, rspbuf,
4394 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 goto unlock;
4396 }
4397
4398 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004399 if (!amp_move_reconf)
4400 len = l2cap_parse_conf_req(sk, rspbuf);
4401 else
4402 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4403
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004404 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004405 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004409 l2cap_pi(sk)->conf_ident = cmd->ident;
4410 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4411
4412 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4413 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4414 !l2cap_pi(sk)->amp_id) {
4415 /* Send success response right after pending if using
4416 * lockstep config on BR/EDR
4417 */
4418 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4419 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4420 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4421 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004422
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004423 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004424 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004426 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004427 goto unlock;
4428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004429 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004431 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4432 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004434 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4435 set_default_fcs(l2cap_pi(sk));
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004437 sk->sk_state = BT_CONNECTED;
4438
4439 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4440 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4441 l2cap_ertm_init(sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03004442
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004444 goto unlock;
4445 }
4446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004447 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004448 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004449 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004451 l2cap_build_conf_req(sk, buf), buf);
4452 l2cap_pi(sk)->num_conf_req++;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004453 }
4454
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455unlock:
4456 bh_unlock_sock(sk);
4457 return 0;
4458}
4459
4460static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4461{
4462 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4463 u16 scid, flags, result;
4464 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004465 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004466 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
4468 scid = __le16_to_cpu(rsp->scid);
4469 flags = __le16_to_cpu(rsp->flags);
4470 result = __le16_to_cpu(rsp->result);
4471
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004472 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4473 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004475 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4476 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 return 0;
4478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004479 pi = l2cap_pi(sk);
4480
4481 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4482 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4483 goto done;
4484 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004485
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 switch (result) {
4487 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004488 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4489 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4490 /* Lockstep procedure requires a pending response
4491 * before success.
4492 */
4493 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4494 goto done;
4495 }
4496
4497 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 break;
4499
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004500 case L2CAP_CONF_PENDING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004501 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4502 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4503 goto done;
4504 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004506 l2cap_conf_rfc_get(sk, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004508 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4509
Peter Krystadf453bb32011-07-19 17:23:34 -07004510 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004511
4512 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4513 struct hci_chan *chan;
4514
4515 /* Already sent a 'pending' response, so set up
4516 * the logical link now
4517 */
Peter Krystada8417e62012-03-21 16:58:17 -07004518 chan = l2cap_chan_admit(pi->amp_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004519 if (!chan) {
4520 l2cap_send_disconn_req(pi->conn, sk,
4521 ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004522 goto done;
4523 }
4524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004525 if (chan->state == BT_CONNECTED)
4526 l2cap_create_cfm(chan, 0);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004527 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004528
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004529 goto done;
4530
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004532 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004533 char req[64];
4534
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004535 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004536 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004537 goto done;
4538 }
4539
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004540 /* throw out any old stored conf requests */
4541 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004542 len = l2cap_parse_conf_rsp(sk, rsp->data,
4543 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004544 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004545 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004546 goto done;
4547 }
4548
4549 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4550 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004551 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004552 if (result != L2CAP_CONF_SUCCESS)
4553 goto done;
4554 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 }
4556
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004557 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004558 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004559 l2cap_sock_set_timer(sk, HZ * 5);
4560 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 goto done;
4562 }
4563
4564 if (flags & 0x01)
4565 goto done;
4566
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004567 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004569 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4570 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004572 sk->sk_state = BT_CONNECTED;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004574 if (pi->mode == L2CAP_MODE_ERTM ||
4575 pi->mode == L2CAP_MODE_STREAMING)
4576 l2cap_ertm_init(sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03004577
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 l2cap_chan_ready(sk);
4579 }
4580
4581done:
4582 bh_unlock_sock(sk);
4583 return 0;
4584}
4585
4586static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4587{
4588 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4589 struct l2cap_disconn_rsp rsp;
4590 u16 dcid, scid;
4591 struct sock *sk;
4592
4593 scid = __le16_to_cpu(req->scid);
4594 dcid = __le16_to_cpu(req->dcid);
4595
4596 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004598 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4599 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004602 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4603 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4605
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004606 /* Only do cleanup if a disconnect request was not sent already */
4607 if (sk->sk_state != BT_DISCONN) {
4608 sk->sk_shutdown = SHUTDOWN_MASK;
4609
Mat Martineau380dcd42011-12-19 10:11:30 -08004610 sk->sk_send_head = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004611 skb_queue_purge(TX_QUEUE(sk));
4612
4613 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4614 skb_queue_purge(SREJ_QUEUE(sk));
4615
4616 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4617 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4618 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4619 }
4620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004622 /* don't delete l2cap channel if sk is owned by user */
4623 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004624 sk->sk_state = BT_DISCONN;
4625 l2cap_sock_clear_timer(sk);
4626 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004627 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004631 l2cap_chan_del(sk, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004632
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004635 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 return 0;
4637}
4638
4639static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4640{
4641 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4642 u16 dcid, scid;
4643 struct sock *sk;
4644
4645 scid = __le16_to_cpu(rsp->scid);
4646 dcid = __le16_to_cpu(rsp->dcid);
4647
4648 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004650 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4651 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004653
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004654 /* don't delete l2cap channel if sk is owned by user */
4655 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004656 sk->sk_state = BT_DISCONN;
4657 l2cap_sock_clear_timer(sk);
4658 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004659 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004663 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 bh_unlock_sock(sk);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004666 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 return 0;
4668}
4669
4670static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4671{
4672 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4673 u16 type;
4674
4675 type = __le16_to_cpu(req->type);
4676
4677 BT_DBG("type 0x%4.4x", type);
4678
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004679 if (type == L2CAP_IT_FEAT_MASK) {
4680 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004681 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004682 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4683 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4684 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004685 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004686 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004687 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004688 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004689 l2cap_send_cmd(conn, cmd->ident,
4690 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004691 } else if (type == L2CAP_IT_FIXED_CHAN) {
4692 u8 buf[12];
Peter Krystad1505bfa2012-06-08 10:47:27 -07004693 u8 fc_mask = l2cap_fc_mask;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004694 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4695 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4696 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Peter Krystad1505bfa2012-06-08 10:47:27 -07004697 if (enable_hs)
4698 fc_mask |= L2CAP_FC_A2MP;
4699 memset(rsp->data, 0, 8);
4700 rsp->data[0] = fc_mask;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004701 l2cap_send_cmd(conn, cmd->ident,
4702 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004703 } else {
4704 struct l2cap_info_rsp rsp;
4705 rsp.type = cpu_to_le16(type);
4706 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4707 l2cap_send_cmd(conn, cmd->ident,
4708 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710
4711 return 0;
4712}
4713
4714static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4715{
4716 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4717 u16 type, result;
4718
4719 type = __le16_to_cpu(rsp->type);
4720 result = __le16_to_cpu(rsp->result);
4721
4722 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4723
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004724 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4725 if (cmd->ident != conn->info_ident ||
4726 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4727 return 0;
4728
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004729 del_timer(&conn->info_timer);
4730
Ville Tervoadb08ed2010-08-04 09:43:33 +03004731 if (result != L2CAP_IR_SUCCESS) {
4732 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4733 conn->info_ident = 0;
4734
4735 l2cap_conn_start(conn);
4736
4737 return 0;
4738 }
4739
Marcel Holtmann984947d2009-02-06 23:35:19 +01004740 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004741 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004742
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004743 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004744 struct l2cap_info_req req;
4745 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4746
4747 conn->info_ident = l2cap_get_ident(conn);
4748
4749 l2cap_send_cmd(conn, conn->info_ident,
4750 L2CAP_INFO_REQ, sizeof(req), &req);
4751 } else {
4752 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4753 conn->info_ident = 0;
4754
4755 l2cap_conn_start(conn);
4756 }
4757 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004758 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004759 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004760 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004761
4762 l2cap_conn_start(conn);
4763 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004764
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765 return 0;
4766}
4767
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004768static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4769 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004770{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004771 struct l2cap_move_chan_req req;
4772 u8 ident;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004774 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4775 (int) dest_amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004777 ident = l2cap_get_ident(conn);
4778 if (pi)
4779 pi->ident = ident;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004781 req.icid = cpu_to_le16(icid);
4782 req.dest_amp_id = dest_amp_id;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004784 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004785}
4786
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004787static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004788 u16 icid, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004789{
4790 struct l2cap_move_chan_rsp rsp;
4791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004792 BT_DBG("icid %d, result %d", (int) icid, (int) result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004793
4794 rsp.icid = cpu_to_le16(icid);
4795 rsp.result = cpu_to_le16(result);
4796
4797 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4798}
4799
4800static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004801 struct l2cap_pinfo *pi, u16 icid, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004802{
4803 struct l2cap_move_chan_cfm cfm;
4804 u8 ident;
4805
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004806 BT_DBG("icid %d, result %d", (int) icid, (int) result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004807
4808 ident = l2cap_get_ident(conn);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004809 if (pi)
4810 pi->ident = ident;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004811
4812 cfm.icid = cpu_to_le16(icid);
4813 cfm.result = cpu_to_le16(result);
4814
4815 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4816}
4817
4818static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004819 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004820{
4821 struct l2cap_move_chan_cfm_rsp rsp;
4822
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004823 BT_DBG("icid %d", (int) icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004824
4825 rsp.icid = cpu_to_le16(icid);
4826 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4827}
4828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004829static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4830 struct l2cap_cmd_hdr *cmd, u8 *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004831{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004832 struct l2cap_create_chan_req *req =
4833 (struct l2cap_create_chan_req *) data;
4834 struct sock *sk;
4835 u16 psm, scid;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004837 psm = le16_to_cpu(req->psm);
4838 scid = le16_to_cpu(req->scid);
4839
4840 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4841 (int) req->amp_id);
4842
4843 if (req->amp_id) {
4844 struct hci_dev *hdev;
4845
4846 /* Validate AMP controller id */
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004847 hdev = hci_dev_get(req->amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004848 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4849 struct l2cap_create_chan_rsp rsp;
4850
4851 rsp.dcid = 0;
4852 rsp.scid = cpu_to_le16(scid);
4853 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4854 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4855
4856 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4857 sizeof(rsp), &rsp);
4858
4859 if (hdev)
4860 hci_dev_put(hdev);
4861
4862 return 0;
4863 }
4864
4865 hci_dev_put(hdev);
4866 }
4867
4868 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4869 req->amp_id);
4870
Mat Martineau55f2a622011-09-19 13:20:17 -07004871 if (sk)
4872 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004873
Mat Martineau55f2a622011-09-19 13:20:17 -07004874 if (sk && req->amp_id &&
4875 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004876 amp_accept_physical(conn, req->amp_id, sk);
4877
4878 return 0;
4879}
4880
4881static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4882 struct l2cap_cmd_hdr *cmd, u8 *data)
4883{
4884 BT_DBG("conn %p", conn);
4885
4886 return l2cap_connect_rsp(conn, cmd, data);
4887}
4888
4889static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4890 struct l2cap_cmd_hdr *cmd, u8 *data)
4891{
4892 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4893 struct sock *sk;
4894 struct l2cap_pinfo *pi;
4895 u16 icid = 0;
4896 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004897
4898 icid = le16_to_cpu(req->icid);
4899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004900 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004902 read_lock(&conn->chan_list.lock);
4903 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4904 read_unlock(&conn->chan_list.lock);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004906 if (!sk)
4907 goto send_move_response;
4908
4909 lock_sock(sk);
4910 pi = l2cap_pi(sk);
4911
4912 if (pi->scid < L2CAP_CID_DYN_START ||
4913 (pi->mode != L2CAP_MODE_ERTM &&
4914 pi->mode != L2CAP_MODE_STREAMING)) {
4915 goto send_move_response;
4916 }
4917
4918 if (pi->amp_id == req->dest_amp_id) {
4919 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4920 goto send_move_response;
4921 }
4922
4923 if (req->dest_amp_id) {
4924 struct hci_dev *hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004925 hdev = hci_dev_get(req->dest_amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004926 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4927 if (hdev)
4928 hci_dev_put(hdev);
4929
4930 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4931 goto send_move_response;
4932 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08004933 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004934 }
4935
4936 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4937 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4938 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4939 bacmp(conn->src, conn->dst) > 0) {
4940 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4941 goto send_move_response;
4942 }
4943
4944 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4945 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4946 goto send_move_response;
4947 }
4948
4949 pi->amp_move_cmd_ident = cmd->ident;
4950 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4951 l2cap_amp_move_setup(sk);
4952 pi->amp_move_id = req->dest_amp_id;
4953 icid = pi->dcid;
4954
4955 if (req->dest_amp_id == 0) {
4956 /* Moving to BR/EDR */
4957 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4958 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4959 result = L2CAP_MOVE_CHAN_PENDING;
4960 } else {
4961 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4962 result = L2CAP_MOVE_CHAN_SUCCESS;
4963 }
4964 } else {
4965 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4966 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4967 result = L2CAP_MOVE_CHAN_PENDING;
4968 }
4969
4970send_move_response:
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004971 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004973 if (sk)
4974 release_sock(sk);
4975
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004976 return 0;
4977}
4978
4979static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004980 struct l2cap_cmd_hdr *cmd, u8 *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004981{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004982 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4983 struct sock *sk;
4984 struct l2cap_pinfo *pi;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004985 u16 icid, result;
4986
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004987 icid = le16_to_cpu(rsp->icid);
4988 result = le16_to_cpu(rsp->result);
4989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004990 BT_DBG("icid %d, result %d", (int) icid, (int) result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004992 switch (result) {
4993 case L2CAP_MOVE_CHAN_SUCCESS:
4994 case L2CAP_MOVE_CHAN_PENDING:
4995 read_lock(&conn->chan_list.lock);
4996 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4997 read_unlock(&conn->chan_list.lock);
4998
4999 if (!sk) {
5000 l2cap_send_move_chan_cfm(conn, NULL, icid,
5001 L2CAP_MOVE_CHAN_UNCONFIRMED);
5002 break;
5003 }
5004
5005 lock_sock(sk);
5006 pi = l2cap_pi(sk);
5007
5008 l2cap_sock_clear_timer(sk);
5009 if (result == L2CAP_MOVE_CHAN_PENDING)
5010 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
5011
5012 if (pi->amp_move_state ==
5013 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5014 /* Move confirm will be sent when logical link
5015 * is complete.
5016 */
5017 pi->amp_move_state =
5018 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
Peter Krystad0750f602012-03-19 15:58:20 -07005019 } else if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005020 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
Peter Krystad0750f602012-03-19 15:58:20 -07005021 if (result == L2CAP_MOVE_CHAN_PENDING) {
5022 break;
5023 } else if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005024 pi->amp_move_state =
5025 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5026 } else {
Peter Krystad0750f602012-03-19 15:58:20 -07005027 /* Logical link is up or moving to BR/EDR,
5028 * proceed with move */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005029 pi->amp_move_state =
5030 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5031 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5032 L2CAP_MOVE_CHAN_CONFIRMED);
5033 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5034 }
5035 } else if (pi->amp_move_state ==
5036 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
5037 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5038 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5039 struct hci_chan *chan;
5040 /* Moving to AMP */
5041 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
5042 /* Remote is ready, send confirm immediately
5043 * after logical link is ready
5044 */
5045 pi->amp_move_state =
5046 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5047 } else {
5048 /* Both logical link and move success
5049 * are required to confirm
5050 */
5051 pi->amp_move_state =
5052 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
5053 }
5054 pi->remote_fs = default_fs;
5055 pi->local_fs = default_fs;
Peter Krystada8417e62012-03-21 16:58:17 -07005056 chan = l2cap_chan_admit(pi->amp_move_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005057 if (!chan) {
5058 /* Logical link not available */
5059 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5060 L2CAP_MOVE_CHAN_UNCONFIRMED);
5061 break;
5062 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005064 if (chan->state == BT_CONNECTED) {
5065 /* Logical link is already ready to go */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005066 pi->ampcon = chan->conn;
5067 pi->ampcon->l2cap_data = pi->conn;
5068 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
5069 /* Can confirm now */
5070 l2cap_send_move_chan_cfm(conn, pi,
5071 pi->scid,
5072 L2CAP_MOVE_CHAN_CONFIRMED);
5073 } else {
5074 /* Now only need move success
5075 * required to confirm
5076 */
5077 pi->amp_move_state =
5078 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5079 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005080
5081 l2cap_create_cfm(chan, 0);
5082 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005083 } else {
5084 /* Any other amp move state means the move failed. */
Peter Krystad42778422012-02-28 15:20:59 -08005085 pi->amp_move_id = pi->amp_id;
5086 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5087 l2cap_amp_move_revert(sk);
5088 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005089 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5090 L2CAP_MOVE_CHAN_UNCONFIRMED);
5091 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5092 }
5093 break;
5094 default:
5095 /* Failed (including collision case) */
5096 read_lock(&conn->chan_list.lock);
5097 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
5098 read_unlock(&conn->chan_list.lock);
5099
5100 if (!sk) {
5101 /* Could not locate channel, icid is best guess */
5102 l2cap_send_move_chan_cfm(conn, NULL, icid,
5103 L2CAP_MOVE_CHAN_UNCONFIRMED);
5104 break;
5105 }
5106
5107 lock_sock(sk);
5108 pi = l2cap_pi(sk);
5109
5110 l2cap_sock_clear_timer(sk);
5111
5112 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5113 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
5114 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
5115 else {
5116 /* Cleanup - cancel move */
5117 pi->amp_move_id = pi->amp_id;
5118 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5119 l2cap_amp_move_revert(sk);
5120 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5121 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005122 }
5123
5124 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5125 L2CAP_MOVE_CHAN_UNCONFIRMED);
5126 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5127 break;
5128 }
5129
5130 if (sk)
5131 release_sock(sk);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005132
5133 return 0;
5134}
5135
5136static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005137 struct l2cap_cmd_hdr *cmd, u8 *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005138{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005139 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
5140 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005141 struct l2cap_pinfo *pi;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005142 u16 icid, result;
5143
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005144 icid = le16_to_cpu(cfm->icid);
5145 result = le16_to_cpu(cfm->result);
5146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005147 BT_DBG("icid %d, result %d", (int) icid, (int) result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005148
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005149 read_lock(&conn->chan_list.lock);
5150 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
5151 read_unlock(&conn->chan_list.lock);
5152
5153 if (!sk) {
5154 BT_DBG("Bad channel (%d)", (int) icid);
5155 goto send_move_confirm_response;
5156 }
5157
5158 lock_sock(sk);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005159 pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005160
Mat Martineau9f8d4672011-12-14 12:10:46 -08005161 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
5162 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005163 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005164 pi->amp_id = pi->amp_move_id;
5165 if (!pi->amp_id && pi->ampchan) {
5166 struct hci_chan *ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005167 struct hci_conn *ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005168 /* Have moved off of AMP, free the channel */
Mat Martineau9f8d4672011-12-14 12:10:46 -08005169 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005170 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005171 if (hci_chan_put(ampchan))
5172 ampcon->l2cap_data = NULL;
5173 else
5174 l2cap_deaggregate(ampchan, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005175 }
5176 l2cap_amp_move_success(sk);
5177 } else {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005178 pi->amp_move_id = pi->amp_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005179 l2cap_amp_move_revert(sk);
5180 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005181 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5182 } else if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005183 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005184 BT_DBG("Bad AMP_MOVE_STATE (%d)", pi->amp_move_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005185 }
5186
5187send_move_confirm_response:
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005188 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5189
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005190 if (sk)
5191 release_sock(sk);
5192
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005193 return 0;
5194}
5195
5196static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005197 struct l2cap_cmd_hdr *cmd, u8 *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005198{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005199 struct l2cap_move_chan_cfm_rsp *rsp =
5200 (struct l2cap_move_chan_cfm_rsp *) data;
5201 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005202 struct l2cap_pinfo *pi;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005204 u16 icid;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005205
5206 icid = le16_to_cpu(rsp->icid);
5207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005208 BT_DBG("icid %d", (int) icid);
5209
5210 read_lock(&conn->chan_list.lock);
5211 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5212 read_unlock(&conn->chan_list.lock);
5213
5214 if (!sk)
5215 return 0;
5216
5217 lock_sock(sk);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005218 pi = l2cap_pi(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005219
5220 l2cap_sock_clear_timer(sk);
5221
Mat Martineau9f8d4672011-12-14 12:10:46 -08005222 if (pi->amp_move_state ==
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005223 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005224 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5225 pi->amp_id = pi->amp_move_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005226
Peter Krystaddcfeee22012-03-07 12:51:18 -08005227 if (!pi->amp_id && pi->ampchan) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005228 struct hci_chan *ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005229 struct hci_conn *ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005230 /* Have moved off of AMP, free the channel */
Mat Martineau9f8d4672011-12-14 12:10:46 -08005231 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005232 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005233 if (hci_chan_put(ampchan))
5234 ampcon->l2cap_data = NULL;
5235 else
Mat Martineau9f8d4672011-12-14 12:10:46 -08005236 l2cap_deaggregate(ampchan, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005237 }
5238
5239 l2cap_amp_move_success(sk);
5240
Mat Martineau9f8d4672011-12-14 12:10:46 -08005241 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005242 }
5243
5244 release_sock(sk);
5245
5246 return 0;
5247}
5248
5249static void l2cap_amp_signal_worker(struct work_struct *work)
5250{
5251 int err = 0;
5252 struct l2cap_amp_signal_work *ampwork =
5253 container_of(work, struct l2cap_amp_signal_work, work);
5254
5255 switch (ampwork->cmd.code) {
5256 case L2CAP_MOVE_CHAN_REQ:
5257 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5258 ampwork->data);
5259 break;
5260
5261 case L2CAP_MOVE_CHAN_RSP:
5262 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5263 ampwork->data);
5264 break;
5265
5266 case L2CAP_MOVE_CHAN_CFM:
5267 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5268 ampwork->data);
5269 break;
5270
5271 case L2CAP_MOVE_CHAN_CFM_RSP:
5272 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5273 &ampwork->cmd, ampwork->data);
5274 break;
5275
5276 default:
5277 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5278 err = -EINVAL;
5279 break;
5280 }
5281
5282 if (err) {
5283 struct l2cap_cmd_rej rej;
5284 BT_DBG("error %d", err);
5285
5286 /* In this context, commands are only rejected with
5287 * "command not understood", code 0.
5288 */
5289 rej.reason = cpu_to_le16(0);
5290 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5291 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5292 }
5293
5294 kfree_skb(ampwork->skb);
5295 kfree(ampwork);
5296}
5297
5298void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5299 struct sock *sk)
5300{
5301 struct l2cap_pinfo *pi;
5302
5303 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5304 (int) local_id, (int) remote_id, sk);
5305
5306 lock_sock(sk);
5307
5308 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5309 release_sock(sk);
5310 return;
5311 }
5312
5313 pi = l2cap_pi(sk);
5314
5315 if (sk->sk_state != BT_CONNECTED) {
5316 if (bt_sk(sk)->parent) {
5317 struct l2cap_conn_rsp rsp;
5318 char buf[128];
5319 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5320 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5321
5322 /* Incoming channel on AMP */
5323 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5324 /* Send successful response */
5325 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5326 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5327 } else {
5328 /* Send negative response */
5329 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5330 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5331 }
5332
5333 l2cap_send_cmd(pi->conn, pi->ident,
5334 L2CAP_CREATE_CHAN_RSP,
5335 sizeof(rsp), &rsp);
5336
5337 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5338 sk->sk_state = BT_CONFIG;
5339 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5340 l2cap_send_cmd(pi->conn,
5341 l2cap_get_ident(pi->conn),
5342 L2CAP_CONF_REQ,
5343 l2cap_build_conf_req(sk, buf), buf);
5344 l2cap_pi(sk)->num_conf_req++;
5345 }
5346 } else {
5347 /* Outgoing channel on AMP */
5348 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5349 /* Revert to BR/EDR connect */
5350 l2cap_send_conn_req(sk);
5351 } else {
5352 pi->amp_id = local_id;
5353 l2cap_send_create_chan_req(sk, remote_id);
5354 }
5355 }
5356 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5357 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5358 l2cap_amp_move_setup(sk);
5359 pi->amp_move_id = local_id;
5360 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5361
5362 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5363 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5364 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5365 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5366 struct hci_chan *chan;
5367 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5368 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5369 pi->remote_fs = default_fs;
5370 pi->local_fs = default_fs;
Peter Krystada8417e62012-03-21 16:58:17 -07005371 chan = l2cap_chan_admit(local_id, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005372 if (chan) {
5373 if (chan->state == BT_CONNECTED) {
5374 /* Logical link is ready to go */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005375 pi->ampcon = chan->conn;
5376 pi->ampcon->l2cap_data = pi->conn;
5377 pi->amp_move_state =
5378 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5379 l2cap_send_move_chan_rsp(pi->conn,
5380 pi->amp_move_cmd_ident, pi->dcid,
5381 L2CAP_MOVE_CHAN_SUCCESS);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005382
5383 l2cap_create_cfm(chan, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005384 } else {
5385 /* Wait for logical link to be ready */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005386 pi->amp_move_state =
5387 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5388 }
5389 } else {
5390 /* Logical link not available */
5391 l2cap_send_move_chan_rsp(pi->conn,
5392 pi->amp_move_cmd_ident, pi->dcid,
5393 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5394 }
5395 } else {
5396 BT_DBG("result %d, role %d, local_busy %d", result,
5397 (int) pi->amp_move_role,
5398 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5399
5400 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5401 if (result == -EINVAL)
5402 l2cap_send_move_chan_rsp(pi->conn,
5403 pi->amp_move_cmd_ident, pi->dcid,
5404 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5405 else
5406 l2cap_send_move_chan_rsp(pi->conn,
5407 pi->amp_move_cmd_ident, pi->dcid,
5408 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5409 }
5410
5411 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5412 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5413
5414 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5415 l2cap_rmem_available(sk))
5416 l2cap_ertm_tx(sk, 0, 0,
5417 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5418
5419 /* Restart data transmission */
5420 l2cap_ertm_send(sk);
5421 }
5422
5423 release_sock(sk);
5424}
5425
Peter Krystadff9718f2012-04-26 16:17:50 -07005426static void l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005427{
5428 struct l2cap_pinfo *pi;
5429 struct sock *sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005430 struct hci_chan *ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005431 struct hci_conn *ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005432
5433 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5434
5435 sk = chan->l2cap_sk;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005436 chan->l2cap_sk = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005437
5438 BT_DBG("sk %p", sk);
5439
5440 lock_sock(sk);
5441
5442 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5443 release_sock(sk);
Peter Krystadff9718f2012-04-26 16:17:50 -07005444 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005445 }
5446
5447 pi = l2cap_pi(sk);
5448
5449 if ((!status) && (chan != NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005450 pi->ampcon = chan->conn;
5451 pi->ampcon->l2cap_data = pi->conn;
5452
Peter Krystad42778422012-02-28 15:20:59 -08005453 BT_DBG("amp_move_state %d", pi->amp_move_state);
5454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005455 if (sk->sk_state != BT_CONNECTED) {
5456 struct l2cap_conf_rsp rsp;
5457
5458 /* Must use spinlock to prevent concurrent
5459 * execution of l2cap_config_rsp()
5460 */
5461 bh_lock_sock(sk);
5462 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5463 l2cap_build_conf_rsp(sk, &rsp,
5464 L2CAP_CONF_SUCCESS, 0), &rsp);
5465 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5466
5467 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5468 set_default_fcs(l2cap_pi(sk));
5469
5470 sk->sk_state = BT_CONNECTED;
5471
5472 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5473 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5474 l2cap_ertm_init(sk);
5475
5476 l2cap_chan_ready(sk);
5477 }
5478 bh_unlock_sock(sk);
5479 } else if (pi->amp_move_state ==
5480 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5481 /* Move confirm will be sent after a success
5482 * response is received
5483 */
5484 pi->amp_move_state =
5485 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5486 } else if (pi->amp_move_state ==
5487 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5488 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5489 pi->amp_move_state =
5490 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5491 else if (pi->amp_move_role ==
5492 L2CAP_AMP_MOVE_INITIATOR) {
5493 pi->amp_move_state =
5494 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5495 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5496 L2CAP_MOVE_CHAN_SUCCESS);
5497 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5498 } else if (pi->amp_move_role ==
5499 L2CAP_AMP_MOVE_RESPONDER) {
5500 pi->amp_move_state =
5501 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5502 l2cap_send_move_chan_rsp(pi->conn,
5503 pi->amp_move_cmd_ident, pi->dcid,
5504 L2CAP_MOVE_CHAN_SUCCESS);
5505 }
Peter Krystad42778422012-02-28 15:20:59 -08005506 } else if ((pi->amp_move_state !=
5507 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) &&
5508 (pi->amp_move_state !=
Peter Krystada609f1d2012-04-03 16:06:17 -07005509 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) &&
5510 (pi->amp_move_state !=
5511 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP)) {
Peter Krystaddcfeee22012-03-07 12:51:18 -08005512 /* Move was not in expected state, free the channel */
Peter Krystad42778422012-02-28 15:20:59 -08005513 ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005514 ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005515 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005516 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005517 if (ampchan) {
5518 if (hci_chan_put(ampchan))
5519 ampcon->l2cap_data = NULL;
5520 else
5521 l2cap_deaggregate(ampchan, pi);
5522 }
Peter Krystad42778422012-02-28 15:20:59 -08005523 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005524 }
5525 } else {
5526 /* Logical link setup failed. */
5527
5528 if (sk->sk_state != BT_CONNECTED)
5529 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5530 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5531 l2cap_amp_move_revert(sk);
5532 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5533 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5534 l2cap_send_move_chan_rsp(pi->conn,
5535 pi->amp_move_cmd_ident, pi->dcid,
5536 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5537 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5538 if ((pi->amp_move_state ==
5539 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5540 (pi->amp_move_state ==
5541 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5542 /* Remote has only sent pending or
5543 * success responses, clean up
5544 */
5545 l2cap_amp_move_revert(sk);
5546 l2cap_pi(sk)->amp_move_role =
5547 L2CAP_AMP_MOVE_NONE;
5548 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5549 }
5550
5551 /* Other amp move states imply that the move
5552 * has already aborted
5553 */
5554 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5555 L2CAP_MOVE_CHAN_UNCONFIRMED);
5556 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5557 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005558 ampchan = pi->ampchan;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005559 ampcon = pi->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005560 pi->ampchan = NULL;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005561 pi->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005562 if (ampchan) {
5563 if (hci_chan_put(ampchan))
5564 ampcon->l2cap_data = NULL;
5565 else
5566 l2cap_deaggregate(ampchan, pi);
5567 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005568 }
5569
5570 release_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005571}
5572
5573static void l2cap_logical_link_worker(struct work_struct *work)
5574{
5575 struct l2cap_logical_link_work *log_link_work =
5576 container_of(work, struct l2cap_logical_link_work, work);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005577 struct sock *sk = log_link_work->chan->l2cap_sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005578
Peter Krystad6bba0082012-05-23 17:08:16 -07005579 if (sk) {
5580 l2cap_logical_link_complete(log_link_work->chan,
5581 log_link_work->status);
5582 sock_put(sk);
5583 }
Mat Martineau9f8d4672011-12-14 12:10:46 -08005584 hci_chan_put(log_link_work->chan);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005585 kfree(log_link_work);
5586}
5587
5588static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5589{
5590 struct l2cap_logical_link_work *amp_work;
5591
Peter Krystada8417e62012-03-21 16:58:17 -07005592 if (!chan->l2cap_sk) {
Mat Martineau9f8d4672011-12-14 12:10:46 -08005593 BT_ERR("Expected l2cap_sk to point to connecting socket");
5594 return -EFAULT;
5595 }
5596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005597 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005598 if (!amp_work) {
5599 sock_put(chan->l2cap_sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005600 return -ENOMEM;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005601 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005602
5603 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5604 amp_work->chan = chan;
5605 amp_work->status = status;
Mat Martineau9f8d4672011-12-14 12:10:46 -08005606
5607 hci_chan_hold(chan);
5608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005609 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5610 kfree(amp_work);
Mat Martineau9f8d4672011-12-14 12:10:46 -08005611 sock_put(chan->l2cap_sk);
5612 hci_chan_put(chan);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005613 return -ENOMEM;
5614 }
5615
5616 return 0;
5617}
5618
5619int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5620{
5621 struct l2cap_conn *conn = chan->conn->l2cap_data;
5622
5623 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5624
5625 /* TODO: if failed status restore previous fs */
5626 return 0;
5627}
5628
5629int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5630{
5631 struct l2cap_chan_list *l;
5632 struct l2cap_conn *conn = chan->conn->l2cap_data;
5633 struct sock *sk;
5634
5635 BT_DBG("chan %p conn %p", chan, conn);
5636
5637 if (!conn)
5638 return 0;
5639
5640 l = &conn->chan_list;
5641
5642 read_lock(&l->lock);
5643
5644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5645 bh_lock_sock(sk);
5646 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5647 if (l2cap_pi(sk)->ampchan == chan) {
Peter Krystaddcfeee22012-03-07 12:51:18 -08005648 struct hci_conn *ampcon = l2cap_pi(sk)->ampcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005649 l2cap_pi(sk)->ampchan = NULL;
Peter Krystad1f8a8a52011-12-01 14:18:37 -08005650 l2cap_pi(sk)->ampcon = NULL;
Peter Krystaddcfeee22012-03-07 12:51:18 -08005651 if (hci_chan_put(chan))
5652 ampcon->l2cap_data = NULL;
5653 else
5654 l2cap_deaggregate(chan, l2cap_pi(sk));
Mat Martineau9f8d4672011-12-14 12:10:46 -08005655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005656 l2cap_amp_move_init(sk);
5657 }
5658 bh_unlock_sock(sk);
5659 }
5660
5661 read_unlock(&l->lock);
5662
5663 return 0;
5664
5665
5666}
5667
5668static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5669 u8 *data, struct sk_buff *skb)
5670{
5671 struct l2cap_amp_signal_work *amp_work;
5672
5673 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5674 if (!amp_work)
5675 return -ENOMEM;
5676
5677 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5678 amp_work->conn = conn;
5679 amp_work->cmd = *cmd;
5680 amp_work->data = data;
5681 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5682 if (!amp_work->skb) {
5683 kfree(amp_work);
5684 return -ENOMEM;
5685 }
5686
5687 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5688 kfree_skb(amp_work->skb);
5689 kfree(amp_work);
5690 return -ENOMEM;
5691 }
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005692
5693 return 0;
5694}
5695
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005696static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005697 u16 to_multiplier)
5698{
5699 u16 max_latency;
5700
5701 if (min > max || min < 6 || max > 3200)
5702 return -EINVAL;
5703
5704 if (to_multiplier < 10 || to_multiplier > 3200)
5705 return -EINVAL;
5706
5707 if (max >= to_multiplier * 8)
5708 return -EINVAL;
5709
5710 max_latency = (to_multiplier * 8 / max) - 1;
5711 if (latency > 499 || latency > max_latency)
5712 return -EINVAL;
5713
5714 return 0;
5715}
5716
5717static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5718 struct l2cap_cmd_hdr *cmd, u8 *data)
5719{
5720 struct hci_conn *hcon = conn->hcon;
5721 struct l2cap_conn_param_update_req *req;
5722 struct l2cap_conn_param_update_rsp rsp;
Brian Gixa94b6122012-02-23 16:07:10 -08005723 struct sock *sk;
5724 u16 min, max, latency, timeout, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005725 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005726
5727 if (!(hcon->link_mode & HCI_LM_MASTER))
5728 return -EINVAL;
5729
5730 cmd_len = __le16_to_cpu(cmd->len);
5731 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5732 return -EPROTO;
5733
Claudio Takahaside731152011-02-11 19:28:55 -02005734 memset(&rsp, 0, sizeof(rsp));
Brian Gixa94b6122012-02-23 16:07:10 -08005735 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005736
Brian Gixa94b6122012-02-23 16:07:10 -08005737 sk = l2cap_find_sock_by_fixed_cid_and_dir(4, conn->src, conn->dst, 0);
5738
5739 if (sk && !bt_sk(sk)->le_params.prohibit_remote_chg) {
5740 req = (struct l2cap_conn_param_update_req *) data;
5741 min = __le16_to_cpu(req->min);
5742 max = __le16_to_cpu(req->max);
5743 latency = __le16_to_cpu(req->latency);
5744 timeout = __le16_to_cpu(req->to_multiplier);
5745
5746 err = l2cap_check_conn_param(min, max, latency, timeout);
5747 if (!err) {
5748 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5749 hci_le_conn_update(hcon, min, max, latency, timeout);
5750 bt_sk(sk)->le_params.interval_min = min;
5751 bt_sk(sk)->le_params.interval_max = max;
5752 bt_sk(sk)->le_params.latency = latency;
5753 bt_sk(sk)->le_params.supervision_timeout = timeout;
5754 }
5755 }
Claudio Takahaside731152011-02-11 19:28:55 -02005756
5757 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5758 sizeof(rsp), &rsp);
5759
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005760
Claudio Takahaside731152011-02-11 19:28:55 -02005761 return 0;
5762}
5763
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005764static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005765 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5766 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005767{
5768 int err = 0;
5769
5770 switch (cmd->code) {
5771 case L2CAP_COMMAND_REJ:
5772 l2cap_command_rej(conn, cmd, data);
5773 break;
5774
5775 case L2CAP_CONN_REQ:
5776 err = l2cap_connect_req(conn, cmd, data);
5777 break;
5778
5779 case L2CAP_CONN_RSP:
5780 err = l2cap_connect_rsp(conn, cmd, data);
5781 break;
5782
5783 case L2CAP_CONF_REQ:
5784 err = l2cap_config_req(conn, cmd, cmd_len, data);
5785 break;
5786
5787 case L2CAP_CONF_RSP:
5788 err = l2cap_config_rsp(conn, cmd, data);
5789 break;
5790
5791 case L2CAP_DISCONN_REQ:
5792 err = l2cap_disconnect_req(conn, cmd, data);
5793 break;
5794
5795 case L2CAP_DISCONN_RSP:
5796 err = l2cap_disconnect_rsp(conn, cmd, data);
5797 break;
5798
5799 case L2CAP_ECHO_REQ:
5800 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5801 break;
5802
5803 case L2CAP_ECHO_RSP:
5804 break;
5805
5806 case L2CAP_INFO_REQ:
5807 err = l2cap_information_req(conn, cmd, data);
5808 break;
5809
5810 case L2CAP_INFO_RSP:
5811 err = l2cap_information_rsp(conn, cmd, data);
5812 break;
5813
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005814 case L2CAP_CREATE_CHAN_REQ:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005815 err = l2cap_create_channel_req(conn, cmd, data);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005816 break;
5817
5818 case L2CAP_CREATE_CHAN_RSP:
5819 err = l2cap_create_channel_rsp(conn, cmd, data);
5820 break;
5821
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005822 case L2CAP_MOVE_CHAN_REQ:
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005823 case L2CAP_MOVE_CHAN_RSP:
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005824 case L2CAP_MOVE_CHAN_CFM:
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005825 case L2CAP_MOVE_CHAN_CFM_RSP:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005826 err = l2cap_sig_amp(conn, cmd, data, skb);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005827 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005828 default:
5829 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5830 err = -EINVAL;
5831 break;
5832 }
5833
5834 return err;
5835}
5836
5837static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5838 struct l2cap_cmd_hdr *cmd, u8 *data)
5839{
5840 switch (cmd->code) {
5841 case L2CAP_COMMAND_REJ:
5842 return 0;
5843
5844 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005845 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005846
5847 case L2CAP_CONN_PARAM_UPDATE_RSP:
5848 return 0;
5849
5850 default:
5851 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5852 return -EINVAL;
5853 }
5854}
5855
5856static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5857 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858{
5859 u8 *data = skb->data;
5860 int len = skb->len;
5861 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005862 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863
5864 l2cap_raw_recv(conn, skb);
5865
5866 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005867 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005868 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5869 data += L2CAP_CMD_HDR_SIZE;
5870 len -= L2CAP_CMD_HDR_SIZE;
5871
Al Viro88219a02007-07-29 00:17:25 -07005872 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005873
Al Viro88219a02007-07-29 00:17:25 -07005874 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875
Al Viro88219a02007-07-29 00:17:25 -07005876 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005877 BT_DBG("corrupted command");
5878 break;
5879 }
5880
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005881 if (conn->hcon->type == LE_LINK)
5882 err = l2cap_le_sig_cmd(conn, &cmd, data);
5883 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005884 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5885 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886
5887 if (err) {
5888 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005889
5890 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005891
5892 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005893 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5895 }
5896
Al Viro88219a02007-07-29 00:17:25 -07005897 data += cmd_len;
5898 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 }
5900
5901 kfree_skb(skb);
5902}
5903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005904static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005905{
5906 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005907 int hdr_size;
5908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005909 if (pi->extended_control)
5910 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005911 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005912 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005914 if (pi->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005915 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005916 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5917 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005919 if (our_fcs != rcv_fcs) {
5920 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005921 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005922 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005923 }
5924 return 0;
5925}
5926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005927static void l2cap_ertm_pass_to_tx(struct sock *sk,
5928 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005929{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005930 BT_DBG("sk %p, control %p", sk, control);
5931 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005932}
5933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005934static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5935 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005936{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005937 BT_DBG("sk %p, control %p", sk, control);
5938 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5939}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005940
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005941static void l2cap_ertm_resend(struct sock *sk)
5942{
5943 struct bt_l2cap_control control;
5944 struct l2cap_pinfo *pi;
5945 struct sk_buff *skb;
5946 struct sk_buff *tx_skb;
5947 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005949 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005951 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005953 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5954 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005955
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005956 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5957 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5958 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005960 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5961 seq = l2cap_seq_list_pop(&pi->retrans_list);
5962
5963 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5964 if (!skb) {
5965 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5966 (int) seq);
5967 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005968 }
5969
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005970 bt_cb(skb)->retries += 1;
5971 control = bt_cb(skb)->control;
5972
5973 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5974 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5975 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5976 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005977 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005978 }
5979
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005980 control.reqseq = pi->buffer_seq;
5981 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5982 control.final = 1;
5983 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5984 } else {
5985 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005986 }
5987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005988 if (skb_cloned(skb)) {
5989 /* Cloned sk_buffs are read-only, so we need a
5990 * writeable copy
5991 */
5992 tx_skb = skb_copy(skb, GFP_ATOMIC);
5993 } else {
5994 tx_skb = skb_clone(skb, GFP_ATOMIC);
5995 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005996
Mat Martineau0c04ef92011-12-07 16:41:22 -08005997 if (!tx_skb) {
5998 l2cap_seq_list_clear(&pi->retrans_list);
5999 break;
6000 }
6001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006002 /* Update skb contents */
6003 if (pi->extended_control) {
6004 put_unaligned_le32(__pack_extended_control(&control),
6005 tx_skb->data + L2CAP_HDR_SIZE);
6006 } else {
6007 put_unaligned_le16(__pack_enhanced_control(&control),
6008 tx_skb->data + L2CAP_HDR_SIZE);
6009 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03006010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006011 if (pi->fcs == L2CAP_FCS_CRC16)
6012 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03006013
Mat Martineau2f0cd842011-10-20 14:34:26 -07006014 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006015 tx_skb->sk = sk;
6016 tx_skb->destructor = l2cap_skb_destructor;
6017 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03006018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006019 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03006020
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006021 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03006022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006023 pi->last_acked_seq = pi->buffer_seq;
Szymon Janc039d9572011-11-16 09:32:19 +01006024 }
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006025}
6026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006027static inline void l2cap_ertm_retransmit(struct sock *sk,
6028 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006029{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006030 BT_DBG("sk %p, control %p", sk, control);
6031
6032 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
6033 l2cap_ertm_resend(sk);
6034}
6035
6036static void l2cap_ertm_retransmit_all(struct sock *sk,
6037 struct bt_l2cap_control *control)
6038{
6039 struct l2cap_pinfo *pi;
6040 struct sk_buff *skb;
6041
6042 BT_DBG("sk %p, control %p", sk, control);
6043
6044 pi = l2cap_pi(sk);
6045
6046 if (control->poll)
6047 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6048
6049 l2cap_seq_list_clear(&pi->retrans_list);
6050
6051 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
6052 return;
6053
6054 if (pi->unacked_frames) {
6055 skb_queue_walk(TX_QUEUE(sk), skb) {
6056 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
6057 skb == sk->sk_send_head)
6058 break;
6059 }
6060
6061 skb_queue_walk_from(TX_QUEUE(sk), skb) {
6062 if (skb == sk->sk_send_head)
6063 break;
6064
6065 l2cap_seq_list_append(&pi->retrans_list,
6066 bt_cb(skb)->control.txseq);
6067 }
6068
6069 l2cap_ertm_resend(sk);
6070 }
6071}
6072
6073static inline void append_skb_frag(struct sk_buff *skb,
Mat Martineau84084a32011-07-22 14:54:00 -07006074 struct sk_buff *new_frag, struct sk_buff **last_frag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075{
Mat Martineau84084a32011-07-22 14:54:00 -07006076 /* skb->len reflects data in skb as well as all fragments
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006077 skb->data_len reflects only data in fragments
Mat Martineau84084a32011-07-22 14:54:00 -07006078 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006079 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
6080
Mat Martineau84084a32011-07-22 14:54:00 -07006081 if (!skb_has_frag_list(skb))
6082 skb_shinfo(skb)->frag_list = new_frag;
6083
6084 new_frag->next = NULL;
6085
6086 (*last_frag)->next = new_frag;
6087 *last_frag = new_frag;
6088
6089 skb->len += new_frag->len;
6090 skb->data_len += new_frag->len;
6091 skb->truesize += new_frag->truesize;
6092}
6093
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006094static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
6095 struct bt_l2cap_control *control, struct sk_buff *skb)
Mat Martineau84084a32011-07-22 14:54:00 -07006096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006097 struct l2cap_pinfo *pi;
Mat Martineau84084a32011-07-22 14:54:00 -07006098 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006100 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
6101 skb, skb->len, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006103 if (!control)
6104 return err;
6105
6106 pi = l2cap_pi(sk);
6107
6108 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
6109 control->frame_type, control->sar, control->txseq,
6110 control->reqseq, control->final);
6111
6112 switch (control->sar) {
6113 case L2CAP_SAR_UNSEGMENTED:
6114 if (pi->sdu) {
6115 BT_DBG("Unexpected unsegmented PDU during reassembly");
6116 kfree_skb(pi->sdu);
6117 pi->sdu = NULL;
6118 pi->sdu_last_frag = NULL;
6119 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006120 }
6121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006122 BT_DBG("Unsegmented");
6123 err = sock_queue_rcv_skb(sk, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07006124 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03006126 case L2CAP_SAR_START:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006127 if (pi->sdu) {
6128 BT_DBG("Unexpected start PDU during reassembly");
6129 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006132 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006133 skb_pull(skb, 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006135 if (pi->sdu_len > pi->imtu) {
Mat Martineau84084a32011-07-22 14:54:00 -07006136 err = -EMSGSIZE;
6137 break;
6138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006140 if (skb->len >= pi->sdu_len)
Mat Martineau84084a32011-07-22 14:54:00 -07006141 break;
6142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006143 pi->sdu = skb;
6144 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006146 BT_DBG("Start");
Mat Martineau84084a32011-07-22 14:54:00 -07006147
6148 skb = NULL;
6149 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006150 break;
6151
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03006152 case L2CAP_SAR_CONTINUE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006153 if (!pi->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07006154 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006156 append_skb_frag(pi->sdu, skb,
6157 &pi->sdu_last_frag);
Mat Martineau84084a32011-07-22 14:54:00 -07006158 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006160 if (pi->sdu->len >= pi->sdu_len)
Mat Martineau84084a32011-07-22 14:54:00 -07006161 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006163 BT_DBG("Continue, reassembled %d", pi->sdu->len);
6164
Mat Martineau84084a32011-07-22 14:54:00 -07006165 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166 break;
6167
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03006168 case L2CAP_SAR_END:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006169 if (!pi->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07006170 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006172 append_skb_frag(pi->sdu, skb,
6173 &pi->sdu_last_frag);
Mat Martineau84084a32011-07-22 14:54:00 -07006174 skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006176 if (pi->sdu->len != pi->sdu_len)
Mat Martineau84084a32011-07-22 14:54:00 -07006177 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006179 BT_DBG("End, reassembled %d", pi->sdu->len);
6180 /* If the sender used tiny PDUs, the rcv queuing could fail.
6181 * Applications that have issues here should use a larger
6182 * sk_rcvbuf.
6183 */
6184 err = sock_queue_rcv_skb(sk, pi->sdu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006185
Mat Martineau84084a32011-07-22 14:54:00 -07006186 if (!err) {
6187 /* Reassembly complete */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006188 pi->sdu = NULL;
6189 pi->sdu_last_frag = NULL;
6190 pi->sdu_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006192 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006194 default:
6195 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006196 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197 }
6198
Mat Martineau84084a32011-07-22 14:54:00 -07006199 if (err) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006200 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
6201 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
6202 if (pi->sdu) {
6203 kfree_skb(pi->sdu);
6204 pi->sdu = NULL;
6205 }
6206 pi->sdu_last_frag = NULL;
6207 pi->sdu_len = 0;
6208 if (skb)
6209 kfree_skb(skb);
6210 }
6211
6212 /* Update local busy state */
6213 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
6214 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
6215
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006216 return err;
6217}
6218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006219static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006220{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006221 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006222 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
6223 * until a gap is encountered.
6224 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006226 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006228 BT_DBG("sk %p", sk);
6229 pi = l2cap_pi(sk);
6230
6231 while (l2cap_rmem_available(sk)) {
6232 struct sk_buff *skb;
6233 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6234 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6235
6236 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6237
6238 if (!skb)
6239 break;
6240
6241 skb_unlink(skb, SREJ_QUEUE(sk));
6242 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6243 err = l2cap_ertm_rx_expected_iframe(sk,
6244 &bt_cb(skb)->control, skb);
6245 if (err)
6246 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006247 }
6248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006249 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6250 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6251 l2cap_ertm_send_ack(sk);
Mat Martineau84084a32011-07-22 14:54:00 -07006252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006253
Mat Martineau84084a32011-07-22 14:54:00 -07006254 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255}
6256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006257static void l2cap_ertm_handle_srej(struct sock *sk,
6258 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006260 struct l2cap_pinfo *pi;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006261 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006263 BT_DBG("sk %p, control %p", sk, control);
Mat Martineaue3281402011-07-07 09:39:02 -07006264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006265 pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006267 if (control->reqseq == pi->next_tx_seq) {
6268 BT_DBG("Invalid reqseq %d, disconnecting",
6269 (int) control->reqseq);
6270 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006271 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006272 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006274 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006276 if (skb == NULL) {
6277 BT_DBG("Seq %d not available for retransmission",
6278 (int) control->reqseq);
6279 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006280 }
6281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006282 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6283 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6284 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6285 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006286 }
6287
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006288 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006289
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006290 if (control->poll) {
6291 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006293 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6294 l2cap_ertm_retransmit(sk, control);
6295 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006296
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006297 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6298 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6299 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006300 }
Andrei Emeltchenkoab784b72011-10-11 13:37:44 +03006301 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006302 l2cap_ertm_pass_to_tx_fbit(sk, control);
6303
6304 if (control->final) {
6305 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6306 (pi->srej_save_reqseq == control->reqseq)) {
6307 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6308 } else {
6309 l2cap_ertm_retransmit(sk, control);
6310 }
6311 } else {
6312 l2cap_ertm_retransmit(sk, control);
6313 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6314 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6315 pi->srej_save_reqseq = control->reqseq;
6316 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006317 }
Andrei Emeltchenkoab784b72011-10-11 13:37:44 +03006318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319}
6320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006321static void l2cap_ertm_handle_rej(struct sock *sk,
6322 struct bt_l2cap_control *control)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006323{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006324 struct l2cap_pinfo *pi;
6325 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006327 BT_DBG("sk %p, control %p", sk, control);
6328
6329 pi = l2cap_pi(sk);
6330
6331 if (control->reqseq == pi->next_tx_seq) {
6332 BT_DBG("Invalid reqseq %d, disconnecting",
6333 (int) control->reqseq);
6334 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6335 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336 }
6337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006338 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006340 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6341 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6342 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6343 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006344 }
6345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006346 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6347
6348 l2cap_ertm_pass_to_tx(sk, control);
6349
6350 if (control->final) {
6351 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6352 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6353 else
6354 l2cap_ertm_retransmit_all(sk, control);
6355 } else {
6356 l2cap_ertm_retransmit_all(sk, control);
6357 l2cap_ertm_send(sk);
6358 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6359 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361}
6362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006363static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006364{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006365 struct l2cap_pinfo *pi;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006367 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6368 pi = l2cap_pi(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006370 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6371 (int)pi->expected_tx_seq);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006372
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006373 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6374 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6375 /* See notes below regarding "double poll" and
6376 * invalid packets.
6377 */
6378 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6379 BT_DBG("Invalid/Ignore - txseq outside "
6380 "tx window after SREJ sent");
6381 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6382 } else {
6383 BT_DBG("Invalid - bad txseq within tx "
6384 "window after SREJ sent");
6385 return L2CAP_ERTM_TXSEQ_INVALID;
6386 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006387 }
6388
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006389 if (pi->srej_list.head == txseq) {
6390 BT_DBG("Expected SREJ");
6391 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006392 }
6393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006394 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6395 BT_DBG("Duplicate SREJ - txseq already stored");
6396 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6397 }
6398
6399 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6400 BT_DBG("Unexpected SREJ - txseq not requested "
6401 "with SREJ");
6402 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6403 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006404 }
6405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006406 if (pi->expected_tx_seq == txseq) {
6407 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6408 BT_DBG("Invalid - txseq outside tx window");
6409 return L2CAP_ERTM_TXSEQ_INVALID;
6410 } else {
6411 BT_DBG("Expected");
6412 return L2CAP_ERTM_TXSEQ_EXPECTED;
6413 }
6414 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006416 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6417 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6418 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6419 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6420 }
6421
6422 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6423 /* A source of invalid packets is a "double poll" condition,
6424 * where delays cause us to send multiple poll packets. If
6425 * the remote stack receives and processes both polls,
6426 * sequence numbers can wrap around in such a way that a
6427 * resent frame has a sequence number that looks like new data
6428 * with a sequence gap. This would trigger an erroneous SREJ
6429 * request.
6430 *
6431 * Fortunately, this is impossible with a tx window that's
6432 * less than half of the maximum sequence number, which allows
6433 * invalid frames to be safely ignored.
6434 *
6435 * With tx window sizes greater than half of the tx window
6436 * maximum, the frame is invalid and cannot be ignored. This
6437 * causes a disconnect.
6438 */
6439
6440 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6441 BT_DBG("Invalid/Ignore - txseq outside tx window");
6442 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6443 } else {
6444 BT_DBG("Invalid - txseq outside tx window");
6445 return L2CAP_ERTM_TXSEQ_INVALID;
6446 }
6447 } else {
6448 BT_DBG("Unexpected - txseq indicates missing frames");
6449 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6450 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006451}
6452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006453static int l2cap_ertm_rx_state_recv(struct sock *sk,
6454 struct bt_l2cap_control *control,
6455 struct sk_buff *skb, u8 event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006457 struct l2cap_pinfo *pi;
6458 int err = 0;
6459 bool skb_in_use = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006461 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6462 (int)event);
6463 pi = l2cap_pi(sk);
6464
6465 switch (event) {
6466 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6467 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6468 case L2CAP_ERTM_TXSEQ_EXPECTED:
6469 l2cap_ertm_pass_to_tx(sk, control);
6470
6471 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6472 BT_DBG("Busy, discarding expected seq %d",
6473 control->txseq);
6474 break;
6475 }
6476
6477 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6478 pi->buffer_seq = pi->expected_tx_seq;
6479 skb_in_use = 1;
6480
6481 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6482 if (err)
6483 break;
6484
6485 if (control->final) {
6486 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6487 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6488 else {
6489 control->final = 0;
6490 l2cap_ertm_retransmit_all(sk, control);
6491 l2cap_ertm_send(sk);
6492 }
6493 }
6494
6495 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6496 l2cap_ertm_send_ack(sk);
6497 break;
6498 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6499 l2cap_ertm_pass_to_tx(sk, control);
6500
6501 /* Can't issue SREJ frames in the local busy state.
6502 * Drop this frame, it will be seen as missing
6503 * when local busy is exited.
6504 */
6505 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6506 BT_DBG("Busy, discarding unexpected seq %d",
6507 control->txseq);
6508 break;
6509 }
6510
6511 /* There was a gap in the sequence, so an SREJ
6512 * must be sent for each missing frame. The
6513 * current frame is stored for later use.
6514 */
6515 skb_queue_tail(SREJ_QUEUE(sk), skb);
6516 skb_in_use = 1;
6517 BT_DBG("Queued %p (queue len %d)", skb,
6518 skb_queue_len(SREJ_QUEUE(sk)));
6519
6520 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6521 l2cap_seq_list_clear(&pi->srej_list);
6522 l2cap_ertm_send_srej(sk, control->txseq);
6523
6524 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6525 break;
6526 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6527 l2cap_ertm_pass_to_tx(sk, control);
6528 break;
6529 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6530 break;
6531 case L2CAP_ERTM_TXSEQ_INVALID:
6532 default:
6533 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6534 ECONNRESET);
6535 break;
6536 }
6537 break;
6538 case L2CAP_ERTM_EVENT_RECV_RR:
6539 l2cap_ertm_pass_to_tx(sk, control);
6540 if (control->final) {
6541 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6542
6543 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6544 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6545 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6546 pi->amp_move_state ==
6547 L2CAP_AMP_STATE_WAIT_PREPARE) {
6548 control->final = 0;
6549 l2cap_ertm_retransmit_all(sk, control);
6550 }
6551
6552 l2cap_ertm_send(sk);
6553 } else if (control->poll) {
6554 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6555 } else {
6556 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6557 pi->unacked_frames)
6558 l2cap_ertm_start_retrans_timer(pi);
6559 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6560 l2cap_ertm_send(sk);
6561 }
6562 break;
6563 case L2CAP_ERTM_EVENT_RECV_RNR:
6564 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6565 l2cap_ertm_pass_to_tx(sk, control);
6566 if (control && control->poll) {
6567 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6568 l2cap_ertm_send_rr_or_rnr(sk, 0);
6569 }
6570 l2cap_ertm_stop_retrans_timer(pi);
6571 l2cap_seq_list_clear(&pi->retrans_list);
6572 break;
6573 case L2CAP_ERTM_EVENT_RECV_REJ:
6574 l2cap_ertm_handle_rej(sk, control);
6575 break;
6576 case L2CAP_ERTM_EVENT_RECV_SREJ:
6577 l2cap_ertm_handle_srej(sk, control);
6578 break;
6579 default:
6580 break;
6581 }
6582
6583 if (skb && !skb_in_use) {
6584 BT_DBG("Freeing %p", skb);
Dan Carpenter33790132012-02-28 09:52:46 +03006585 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006586 }
6587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006588 return err;
6589}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006591static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6592 struct bt_l2cap_control *control,
6593 struct sk_buff *skb, u8 event)
6594{
6595 struct l2cap_pinfo *pi;
6596 int err = 0;
6597 u16 txseq = control->txseq;
6598 bool skb_in_use = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006600 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6601 (int)event);
6602 pi = l2cap_pi(sk);
6603
6604 switch (event) {
6605 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6606 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6607 case L2CAP_ERTM_TXSEQ_EXPECTED:
6608 /* Keep frame for reassembly later */
6609 l2cap_ertm_pass_to_tx(sk, control);
6610 skb_queue_tail(SREJ_QUEUE(sk), skb);
6611 skb_in_use = 1;
6612 BT_DBG("Queued %p (queue len %d)", skb,
6613 skb_queue_len(SREJ_QUEUE(sk)));
6614
6615 pi->expected_tx_seq = __next_seq(txseq, pi);
6616 break;
6617 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6618 l2cap_seq_list_pop(&pi->srej_list);
6619
6620 l2cap_ertm_pass_to_tx(sk, control);
6621 skb_queue_tail(SREJ_QUEUE(sk), skb);
6622 skb_in_use = 1;
6623 BT_DBG("Queued %p (queue len %d)", skb,
6624 skb_queue_len(SREJ_QUEUE(sk)));
6625
6626 err = l2cap_ertm_rx_queued_iframes(sk);
6627 if (err)
6628 break;
6629
6630 break;
6631 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6632 /* Got a frame that can't be reassembled yet.
6633 * Save it for later, and send SREJs to cover
6634 * the missing frames.
6635 */
6636 skb_queue_tail(SREJ_QUEUE(sk), skb);
6637 skb_in_use = 1;
6638 BT_DBG("Queued %p (queue len %d)", skb,
6639 skb_queue_len(SREJ_QUEUE(sk)));
6640
6641 l2cap_ertm_pass_to_tx(sk, control);
6642 l2cap_ertm_send_srej(sk, control->txseq);
6643 break;
6644 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6645 /* This frame was requested with an SREJ, but
6646 * some expected retransmitted frames are
6647 * missing. Request retransmission of missing
6648 * SREJ'd frames.
6649 */
6650 skb_queue_tail(SREJ_QUEUE(sk), skb);
6651 skb_in_use = 1;
6652 BT_DBG("Queued %p (queue len %d)", skb,
6653 skb_queue_len(SREJ_QUEUE(sk)));
6654
6655 l2cap_ertm_pass_to_tx(sk, control);
6656 l2cap_ertm_send_srej_list(sk, control->txseq);
6657 break;
6658 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6659 /* We've already queued this frame. Drop this copy. */
6660 l2cap_ertm_pass_to_tx(sk, control);
6661 break;
6662 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6663 /* Expecting a later sequence number, so this frame
6664 * was already received. Ignore it completely.
6665 */
6666 break;
6667 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6668 break;
6669 case L2CAP_ERTM_TXSEQ_INVALID:
6670 default:
6671 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6672 ECONNRESET);
6673 break;
6674 }
6675 break;
6676 case L2CAP_ERTM_EVENT_RECV_RR:
6677 l2cap_ertm_pass_to_tx(sk, control);
6678 if (control->final) {
6679 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6680
6681 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6682 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6683 else {
6684 control->final = 0;
6685 l2cap_ertm_retransmit_all(sk, control);
6686 }
6687
6688 l2cap_ertm_send(sk);
6689 } else if (control->poll) {
6690 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6691 pi->unacked_frames) {
6692 l2cap_ertm_start_retrans_timer(pi);
6693 }
6694 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6695 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6696 l2cap_ertm_send_srej_tail(sk);
6697 } else {
6698 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6699 pi->unacked_frames) {
6700 l2cap_ertm_start_retrans_timer(pi);
6701 }
6702 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6703 l2cap_ertm_send_ack(sk);
6704 }
6705 break;
6706 case L2CAP_ERTM_EVENT_RECV_RNR:
6707 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6708 l2cap_ertm_pass_to_tx(sk, control);
6709 if (control->poll)
6710 l2cap_ertm_send_srej_tail(sk);
6711 else {
6712 struct bt_l2cap_control rr_control;
6713 memset(&rr_control, 0, sizeof(rr_control));
6714 rr_control.frame_type = 's';
6715 rr_control.super = L2CAP_SFRAME_RR;
6716 rr_control.reqseq = pi->buffer_seq;
6717 l2cap_ertm_send_sframe(sk, &rr_control);
6718 }
6719
6720 break;
6721 case L2CAP_ERTM_EVENT_RECV_REJ:
6722 l2cap_ertm_handle_rej(sk, control);
6723 break;
6724 case L2CAP_ERTM_EVENT_RECV_SREJ:
6725 l2cap_ertm_handle_srej(sk, control);
6726 break;
6727 }
6728
6729 if (skb && !skb_in_use) {
6730 BT_DBG("Freeing %p", skb);
6731 kfree_skb(skb);
6732 }
6733
6734 return err;
6735}
6736
6737static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6738 struct bt_l2cap_control *control,
6739 struct sk_buff *skb, u8 event)
6740{
6741 struct l2cap_pinfo *pi;
6742 int err = 0;
6743 bool skb_in_use = 0;
6744
6745 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6746 (int)event);
6747 pi = l2cap_pi(sk);
6748
6749 /* Only handle expected frames, to avoid state changes. */
6750
6751 switch (event) {
6752 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6753 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6754 L2CAP_ERTM_TXSEQ_EXPECTED) {
6755 l2cap_ertm_pass_to_tx(sk, control);
6756
6757 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6758 BT_DBG("Busy, discarding expected seq %d",
6759 control->txseq);
6760 break;
6761 }
6762
6763 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6764 pi->buffer_seq = pi->expected_tx_seq;
6765 skb_in_use = 1;
6766
6767 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6768 if (err)
6769 break;
6770
6771 if (control->final) {
6772 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6773 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6774 else
6775 control->final = 0;
6776 }
6777 }
6778 break;
6779 case L2CAP_ERTM_EVENT_RECV_RR:
6780 case L2CAP_ERTM_EVENT_RECV_RNR:
6781 case L2CAP_ERTM_EVENT_RECV_REJ:
6782 l2cap_ertm_process_reqseq(sk, control->reqseq);
6783 break;
6784 case L2CAP_ERTM_EVENT_RECV_SREJ:
6785 /* Ignore */
6786 break;
6787 default:
6788 break;
6789 }
6790
6791 if (skb && !skb_in_use) {
6792 BT_DBG("Freeing %p", skb);
6793 kfree_skb(skb);
6794 }
6795
6796 return err;
6797}
6798
6799static int l2cap_answer_move_poll(struct sock *sk)
6800{
6801 struct l2cap_pinfo *pi;
6802 struct bt_l2cap_control control;
6803 int err = 0;
6804
6805 BT_DBG("sk %p", sk);
6806
6807 pi = l2cap_pi(sk);
6808
6809 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6810
6811 if (!skb_queue_empty(TX_QUEUE(sk)))
6812 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6813 else
6814 sk->sk_send_head = NULL;
6815
6816 /* Rewind next_tx_seq to the point expected
6817 * by the receiver.
6818 */
6819 pi->next_tx_seq = pi->amp_move_reqseq;
6820 pi->unacked_frames = 0;
6821
6822 err = l2cap_finish_amp_move(sk);
6823
6824 if (err)
6825 return err;
6826
6827 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6828 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6829
6830 memset(&control, 0, sizeof(control));
6831 control.reqseq = pi->amp_move_reqseq;
6832
6833 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6834 err = -EPROTO;
6835 else
6836 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6837 pi->amp_move_event);
6838
6839 return err;
6840}
6841
6842static void l2cap_amp_move_setup(struct sock *sk)
6843{
6844 struct l2cap_pinfo *pi;
6845 struct sk_buff *skb;
6846
6847 BT_DBG("sk %p", sk);
6848
6849 pi = l2cap_pi(sk);
6850
6851 l2cap_ertm_stop_ack_timer(pi);
6852 l2cap_ertm_stop_retrans_timer(pi);
6853 l2cap_ertm_stop_monitor_timer(pi);
6854
6855 pi->retry_count = 0;
6856 skb_queue_walk(TX_QUEUE(sk), skb) {
6857 if (bt_cb(skb)->retries)
6858 bt_cb(skb)->retries = 1;
6859 else
6860 break;
6861 }
6862
6863 pi->expected_tx_seq = pi->buffer_seq;
6864
6865 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6866 l2cap_seq_list_clear(&pi->retrans_list);
6867 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6868 skb_queue_purge(SREJ_QUEUE(sk));
6869
6870 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6871 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6872
6873 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6874 pi->rx_state);
6875
6876 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6877}
6878
6879static void l2cap_amp_move_revert(struct sock *sk)
6880{
6881 struct l2cap_pinfo *pi;
6882
6883 BT_DBG("sk %p", sk);
6884
6885 pi = l2cap_pi(sk);
6886
6887 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6888 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6889 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6890 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6891 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6892}
6893
6894static int l2cap_amp_move_reconf(struct sock *sk)
6895{
6896 struct l2cap_pinfo *pi;
6897 u8 buf[64];
6898 int err = 0;
6899
6900 BT_DBG("sk %p", sk);
6901
6902 pi = l2cap_pi(sk);
6903
6904 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6905 l2cap_build_amp_reconf_req(sk, buf), buf);
6906 return err;
6907}
6908
6909static void l2cap_amp_move_success(struct sock *sk)
6910{
6911 struct l2cap_pinfo *pi;
6912
6913 BT_DBG("sk %p", sk);
6914
6915 pi = l2cap_pi(sk);
6916
6917 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6918 int err = 0;
6919 /* Send reconfigure request */
6920 if (pi->mode == L2CAP_MODE_ERTM) {
6921 pi->reconf_state = L2CAP_RECONF_INT;
6922 if (enable_reconfig)
6923 err = l2cap_amp_move_reconf(sk);
6924
6925 if (err || !enable_reconfig) {
6926 pi->reconf_state = L2CAP_RECONF_NONE;
6927 l2cap_ertm_tx(sk, NULL, NULL,
6928 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6929 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6930 }
6931 } else
6932 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6933 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6934 if (pi->mode == L2CAP_MODE_ERTM)
6935 pi->rx_state =
6936 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6937 else
6938 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6939 }
6940}
6941
6942static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6943{
6944 /* Make sure reqseq is for a packet that has been sent but not acked */
6945 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6946 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6947}
6948
6949static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6950 struct sk_buff *skb)
6951{
6952 struct l2cap_pinfo *pi;
6953 int err = 0;
6954
6955 BT_DBG("sk %p, control %p, skb %p, state %d",
6956 sk, control, skb, l2cap_pi(sk)->rx_state);
6957
6958 pi = l2cap_pi(sk);
6959
6960 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6961 L2CAP_ERTM_TXSEQ_EXPECTED) {
6962 l2cap_ertm_pass_to_tx(sk, control);
6963
6964 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6965 __next_seq(pi->buffer_seq, pi));
6966
6967 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6968
6969 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6970 } else {
6971 if (pi->sdu) {
6972 kfree_skb(pi->sdu);
6973 pi->sdu = NULL;
6974 }
6975 pi->sdu_last_frag = NULL;
6976 pi->sdu_len = 0;
6977
6978 if (skb) {
6979 BT_DBG("Freeing %p", skb);
6980 kfree_skb(skb);
6981 }
6982 }
6983
6984 pi->last_acked_seq = control->txseq;
6985 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6986
6987 return err;
6988}
6989
6990static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6991 struct sk_buff *skb, u8 event)
6992{
6993 struct l2cap_pinfo *pi;
6994 int err = 0;
6995
6996 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6997 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6998
6999 pi = l2cap_pi(sk);
7000
7001 if (__valid_reqseq(pi, control->reqseq)) {
7002 switch (pi->rx_state) {
7003 case L2CAP_ERTM_RX_STATE_RECV:
7004 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
7005 break;
7006 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
7007 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
7008 event);
7009 break;
7010 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
7011 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
7012 event);
7013 break;
7014 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
7015 if (control->final) {
7016 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
7017 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
7018
7019 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
7020 l2cap_ertm_process_reqseq(sk, control->reqseq);
7021
7022 if (!skb_queue_empty(TX_QUEUE(sk)))
7023 sk->sk_send_head =
7024 skb_peek(TX_QUEUE(sk));
7025 else
7026 sk->sk_send_head = NULL;
7027
7028 /* Rewind next_tx_seq to the point expected
7029 * by the receiver.
7030 */
7031 pi->next_tx_seq = control->reqseq;
7032 pi->unacked_frames = 0;
7033
7034 if (pi->ampcon)
7035 pi->conn->mtu =
7036 pi->ampcon->hdev->acl_mtu;
7037 else
7038 pi->conn->mtu =
7039 pi->conn->hcon->hdev->acl_mtu;
7040
7041 err = l2cap_setup_resegment(sk);
7042
7043 if (err)
7044 break;
7045
7046 err = l2cap_ertm_rx_state_recv(sk, control, skb,
7047 event);
7048 }
7049 break;
7050 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
7051 if (control->poll) {
7052 pi->amp_move_reqseq = control->reqseq;
7053 pi->amp_move_event = event;
7054 err = l2cap_answer_move_poll(sk);
7055 }
7056 break;
7057 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
7058 if (control->poll) {
7059 pi->amp_move_reqseq = control->reqseq;
7060 pi->amp_move_event = event;
7061
7062 BT_DBG("amp_move_role 0x%2.2x, "
7063 "reconf_state 0x%2.2x",
7064 pi->amp_move_role, pi->reconf_state);
7065
7066 if (pi->reconf_state == L2CAP_RECONF_ACC)
7067 err = l2cap_amp_move_reconf(sk);
7068 else
7069 err = l2cap_answer_move_poll(sk);
7070 }
7071 break;
7072 default:
7073 /* shut it down */
7074 break;
7075 }
7076 } else {
7077 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7078 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
7079 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7080 }
7081
7082 return err;
7083}
7084
7085void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
7086{
7087 lock_sock(sk);
7088
7089 l2cap_pi(sk)->fixed_channel = 1;
7090
7091 l2cap_pi(sk)->imtu = opt->imtu;
7092 l2cap_pi(sk)->omtu = opt->omtu;
7093 l2cap_pi(sk)->remote_mps = opt->omtu;
7094 l2cap_pi(sk)->mps = opt->omtu;
7095 l2cap_pi(sk)->flush_to = opt->flush_to;
7096 l2cap_pi(sk)->mode = opt->mode;
7097 l2cap_pi(sk)->fcs = opt->fcs;
7098 l2cap_pi(sk)->max_tx = opt->max_tx;
7099 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
7100 l2cap_pi(sk)->tx_win = opt->txwin_size;
7101 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
7102 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
7103 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
7104
7105 if (opt->mode == L2CAP_MODE_ERTM ||
7106 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
7107 l2cap_ertm_init(sk);
7108
7109 release_sock(sk);
7110
7111 return;
7112}
7113
7114static const u8 l2cap_ertm_rx_func_to_event[4] = {
7115 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
7116 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
7117};
7118
7119int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
7120{
7121 struct l2cap_pinfo *pi;
7122 struct bt_l2cap_control *control;
7123 u16 len;
7124 u8 event;
7125 pi = l2cap_pi(sk);
7126
7127 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
7128
7129 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130 goto drop;
7131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007132 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007133 case L2CAP_MODE_BASIC:
7134 /* If socket recv buffers overflows we drop data here
7135 * which is *bad* because L2CAP has to be reliable.
7136 * But we don't have any other choice. L2CAP doesn't
7137 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007139 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007140 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007142 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007143 goto done;
7144 break;
7145
7146 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03007147 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007148 control = &bt_cb(skb)->control;
7149 if (pi->extended_control) {
7150 __get_extended_control(get_unaligned_le32(skb->data),
7151 control);
7152 skb_pull(skb, 4);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03007153 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007154 __get_enhanced_control(get_unaligned_le16(skb->data),
7155 control);
7156 skb_pull(skb, 2);
Mat Martineau84084a32011-07-22 14:54:00 -07007157 }
7158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007159 len = skb->len;
Mat Martineau84084a32011-07-22 14:54:00 -07007160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007161 if (l2cap_check_fcs(pi, skb))
7162 goto drop;
7163
7164 if ((control->frame_type == 'i') &&
7165 (control->sar == L2CAP_SAR_START))
7166 len -= 2;
7167
7168 if (pi->fcs == L2CAP_FCS_CRC16)
7169 len -= 2;
7170
7171 /*
7172 * We can just drop the corrupted I-frame here.
7173 * Receiver will miss it and start proper recovery
7174 * procedures and ask for retransmission.
7175 */
7176 if (len > pi->mps) {
7177 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7178 goto drop;
7179 }
7180
7181 if (control->frame_type == 'i') {
7182
7183 int err;
7184
7185 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7186 control->sar, control->reqseq, control->final,
7187 control->txseq);
7188
7189 /* Validate F-bit - F=0 always valid, F=1 only
7190 * valid in TX WAIT_F
7191 */
7192 if (control->final && (pi->tx_state !=
7193 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03007194 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007195
7196 if (pi->mode != L2CAP_MODE_STREAMING) {
7197 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
7198 err = l2cap_ertm_rx(sk, control, skb, event);
7199 } else
7200 err = l2cap_strm_rx(sk, control, skb);
7201 if (err)
7202 l2cap_send_disconn_req(pi->conn, sk,
7203 ECONNRESET);
7204 } else {
7205 /* Only I-frames are expected in streaming mode */
7206 if (pi->mode == L2CAP_MODE_STREAMING)
7207 goto drop;
7208
7209 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7210 control->reqseq, control->final, control->poll,
7211 control->super);
7212
7213 if (len != 0) {
7214 l2cap_send_disconn_req(pi->conn, sk,
7215 ECONNRESET);
7216 goto drop;
7217 }
7218
7219 /* Validate F and P bits */
7220 if (control->final &&
7221 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
7222 || control->poll))
7223 goto drop;
7224
7225 event = l2cap_ertm_rx_func_to_event[control->super];
7226 if (l2cap_ertm_rx(sk, control, skb, event))
7227 l2cap_send_disconn_req(pi->conn, sk,
7228 ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230
7231 goto done;
7232
7233 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007234 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 break;
7236 }
7237
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007238drop:
7239 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007240
7241done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007242 return 0;
7243}
7244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007245void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7246{
7247 lock_sock(sk);
7248 l2cap_data_channel(sk, skb);
7249 release_sock(sk);
7250}
7251
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
7253{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007254 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007256 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7257 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258 goto drop;
7259
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007260 bh_lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261
Linus Torvalds1da177e2005-04-16 15:20:36 -07007262 BT_DBG("sk %p, len %d", sk, skb->len);
7263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007264 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265 goto drop;
7266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007267 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007268 goto drop;
7269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007270 if (!sock_queue_rcv_skb(sk, skb))
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007271 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007272
7273drop:
7274 kfree_skb(skb);
7275
7276done:
7277 if (sk)
7278 bh_unlock_sock(sk);
7279 return 0;
7280}
7281
Brian Gix20de7cf2012-02-02 14:56:51 -08007282static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid,
7283 struct sk_buff *skb)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007284{
Brian Gixbcdb7e72012-06-05 13:35:39 -07007285 struct sock *sk = NULL;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007286 struct sk_buff *skb_rsp;
7287 struct l2cap_hdr *lh;
Brian Gix20de7cf2012-02-02 14:56:51 -08007288 int dir;
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -07007289 struct work_struct *open_worker;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007290 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7291 L2CAP_ATT_NOT_SUPPORTED};
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007292
Brian Gixbcdb7e72012-06-05 13:35:39 -07007293 if (skb->data[0] == L2CAP_ATT_MTU_REQ) {
7294 u8 mtu_rsp[] = {L2CAP_ATT_MTU_RSP, 23, 0};
7295
7296 skb_rsp = bt_skb_alloc(sizeof(mtu_rsp) + L2CAP_HDR_SIZE,
7297 GFP_ATOMIC);
7298 if (!skb_rsp)
7299 goto drop;
7300
7301 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7302 lh->len = cpu_to_le16(sizeof(mtu_rsp));
7303 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7304 memcpy(skb_put(skb_rsp, sizeof(mtu_rsp)), mtu_rsp,
7305 sizeof(mtu_rsp));
7306 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7307
7308 goto free_skb;
7309 }
7310
Brian Gix20de7cf2012-02-02 14:56:51 -08007311 dir = (skb->data[0] & L2CAP_ATT_RESPONSE_BIT) ? 0 : 1;
7312
7313 sk = l2cap_find_sock_by_fixed_cid_and_dir(cid, conn->src,
7314 conn->dst, dir);
7315
7316 BT_DBG("sk %p, dir:%d", sk, dir);
7317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007318 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007319 goto drop;
7320
7321 bh_lock_sock(sk);
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007322
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007323 BT_DBG("sk %p, len %d", sk, skb->len);
7324
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -07007325 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) {
7326 att_chn_params.cid = cid;
7327 att_chn_params.conn = conn;
7328 att_chn_params.dir = dir;
7329 att_chn_params.skb = skb;
7330 open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC);
7331 if (!open_worker)
7332 BT_ERR("Out of memory");
7333 INIT_WORK(open_worker, l2cap_queue_acl_data);
7334 schedule_work(open_worker);
7335 goto done;
7336 }
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007338 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007339 goto drop;
7340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007341 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007342 goto done;
7343
7344drop:
Brian Gixbcdb7e72012-06-05 13:35:39 -07007345 if (skb->data[0] != L2CAP_ATT_INDICATE)
7346 goto not_indicate;
7347
7348 /* If this is an incoming Indication, we are required to confirm */
7349
7350 skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7351 if (!skb_rsp)
7352 goto free_skb;
7353
7354 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7355 lh->len = cpu_to_le16(sizeof(u8));
7356 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7357 err_rsp[0] = L2CAP_ATT_CONFIRM;
7358 memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
7359 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7360 goto free_skb;
7361
7362not_indicate:
7363 if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
7364 skb->data[0] == L2CAP_ATT_CONFIRM)
Brian Gix7eaa64d2011-10-19 13:17:42 -07007365 goto free_skb;
7366
7367 /* If this is an incoming PDU that requires a response, respond with
7368 * a generic error so remote device doesn't hang */
7369
7370 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7371 if (!skb_rsp)
7372 goto free_skb;
7373
7374 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7375 lh->len = cpu_to_le16(sizeof(err_rsp));
7376 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7377 err_rsp[1] = skb->data[0];
7378 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7379 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7380
7381free_skb:
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007382 kfree_skb(skb);
7383
7384done:
7385 if (sk)
7386 bh_unlock_sock(sk);
7387 return 0;
7388}
7389
Linus Torvalds1da177e2005-04-16 15:20:36 -07007390static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7391{
7392 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007393 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394 u16 cid, len;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007395 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007396
7397 skb_pull(skb, L2CAP_HDR_SIZE);
7398 cid = __le16_to_cpu(lh->cid);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007399 len = __le16_to_cpu(lh->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400
7401 if (len != skb->len) {
7402 kfree_skb(skb);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007403 return;
7404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007405
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007406 BT_DBG("len %d, cid 0x%4.4x", len, cid);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007407
Marcel Holtmannb4324b52009-06-07 18:06:51 +02007408 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007409 case L2CAP_CID_LE_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 case L2CAP_CID_SIGNALING:
7411 l2cap_sig_channel(conn, skb);
7412 break;
7413
7414 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007415 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 skb_pull(skb, 2);
7417 l2cap_conless_channel(conn, psm, skb);
7418 break;
7419
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007420 case L2CAP_CID_LE_DATA:
7421 l2cap_att_channel(conn, cid, skb);
7422 break;
7423
Anderson Brigliab501d6a2011-06-07 18:46:31 -03007424 case L2CAP_CID_SMP:
7425 if (smp_sig_channel(conn, skb))
Mat Martineau3b9239a2012-02-16 11:54:30 -08007426 l2cap_conn_del(conn->hcon, EACCES, 0);
Anderson Brigliab501d6a2011-06-07 18:46:31 -03007427 break;
7428
Linus Torvalds1da177e2005-04-16 15:20:36 -07007429 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007430 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7431 if (sk) {
7432 if (sock_owned_by_user(sk)) {
7433 BT_DBG("backlog sk %p", sk);
7434 if (sk_add_backlog(sk, skb))
7435 kfree_skb(skb);
7436 } else
7437 l2cap_data_channel(sk, skb);
7438
7439 bh_unlock_sock(sk);
Peter Krystad1505bfa2012-06-08 10:47:27 -07007440 } else if ((cid == L2CAP_CID_A2MP) && enable_hs) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007441 BT_DBG("A2MP");
Peter Krystad072a51f2012-03-30 12:59:33 -07007442 amp_conn_ind(conn->hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007443 } else {
7444 BT_DBG("unknown cid 0x%4.4x", cid);
7445 kfree_skb(skb);
7446 }
7447
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448 break;
7449 }
7450}
7451
7452/* ---- L2CAP interface with lower layer (HCI) ---- */
7453
7454static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7455{
7456 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007457 register struct sock *sk;
7458 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
7460 if (type != ACL_LINK)
Mat Martineau8b51dd42012-02-13 10:38:24 -08007461 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7464
7465 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007466 read_lock(&l2cap_sk_list.lock);
7467 sk_for_each(sk, node, &l2cap_sk_list.head) {
7468 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469 continue;
7470
7471 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
7472 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007473 if (l2cap_pi(sk)->role_switch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007474 lm1 |= HCI_LM_MASTER;
7475 exact++;
7476 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7477 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007478 if (l2cap_pi(sk)->role_switch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007479 lm2 |= HCI_LM_MASTER;
7480 }
7481 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007482 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007483
7484 return exact ? lm1 : lm2;
7485}
7486
7487static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7488{
7489 struct l2cap_conn *conn;
7490
7491 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7492
Ville Tervoacd7d372011-02-10 22:38:49 -03007493 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007494 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495
7496 if (!status) {
7497 conn = l2cap_conn_add(hcon, status);
7498 if (conn)
7499 l2cap_conn_ready(conn);
7500 } else
Mat Martineau3b9239a2012-02-16 11:54:30 -08007501 l2cap_conn_del(hcon, bt_err(status), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007502
7503 return 0;
7504}
7505
7506static int l2cap_disconn_ind(struct hci_conn *hcon)
7507{
7508 struct l2cap_conn *conn = hcon->l2cap_data;
7509
7510 BT_DBG("hcon %p", hcon);
7511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007512 if (hcon->type != ACL_LINK || !conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 return 0x13;
7514
7515 return conn->disc_reason;
7516}
7517
Mat Martineau3b9239a2012-02-16 11:54:30 -08007518static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519{
7520 BT_DBG("hcon %p reason %d", hcon, reason);
7521
Ville Tervoacd7d372011-02-10 22:38:49 -03007522 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007523 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524
Mat Martineau3b9239a2012-02-16 11:54:30 -08007525 l2cap_conn_del(hcon, bt_err(reason), is_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526
7527 return 0;
7528}
7529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007530static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007532 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533 return;
7534
7535 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007536 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7537 l2cap_sock_clear_timer(sk);
7538 l2cap_sock_set_timer(sk, HZ * 5);
Bhakthavatsala Raghavendrab03b5702013-02-12 19:44:47 +05307539 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH ||
7540 l2cap_pi(sk)->sec_level == BT_SECURITY_VERY_HIGH)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007541 __l2cap_sock_close(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007543 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7544 l2cap_sock_clear_timer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545 }
7546}
7547
7548static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7549{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007550 struct l2cap_chan_list *l;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007552 struct sock *sk;
Brian Gix20de7cf2012-02-02 14:56:51 -08007553 int smp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554
7555 if (!conn)
7556 return 0;
7557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007558 l = &conn->chan_list;
7559
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560 BT_DBG("conn %p", conn);
7561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007562 read_lock(&l->lock);
Vinicius Costa Gomes160dc6a2011-08-19 21:06:55 -03007563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007564 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565 bh_lock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007567 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007569 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007570 if (!status && encrypt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007571 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gix416db902012-01-23 14:34:59 -08007572 l2cap_chan_ready(sk);
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007573 }
7574
Brian Gix20de7cf2012-02-02 14:56:51 -08007575 smp = 1;
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007576 bh_unlock_sock(sk);
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007577 continue;
7578 }
7579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007580 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581 bh_unlock_sock(sk);
7582 continue;
7583 }
7584
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007585 if (!status && (sk->sk_state == BT_CONNECTED ||
7586 sk->sk_state == BT_CONFIG)) {
7587 l2cap_check_encryption(sk, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007588 bh_unlock_sock(sk);
7589 continue;
7590 }
7591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007592 if (sk->sk_state == BT_CONNECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007594 l2cap_pi(sk)->conf_state |=
7595 L2CAP_CONF_CONNECT_PEND;
Peter Krystad1505bfa2012-06-08 10:47:27 -07007596 if ((l2cap_pi(sk)->amp_pref ==
7597 BT_AMP_POLICY_PREFER_AMP) &&
7598 enable_hs) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007599 amp_create_physical(l2cap_pi(sk)->conn,
7600 sk);
7601 } else
7602 l2cap_send_conn_req(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007603 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007604 l2cap_sock_clear_timer(sk);
7605 l2cap_sock_set_timer(sk, HZ / 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007606 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007607 } else if (sk->sk_state == BT_CONNECT2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007608 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007609 __u16 result;
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007610
Linus Torvalds1da177e2005-04-16 15:20:36 -07007611 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007612 if (l2cap_pi(sk)->amp_id) {
7613 amp_accept_physical(conn,
7614 l2cap_pi(sk)->amp_id, sk);
7615 bh_unlock_sock(sk);
7616 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007617 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007618
7619 sk->sk_state = BT_CONFIG;
7620 result = L2CAP_CR_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007621 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007622 sk->sk_state = BT_DISCONN;
7623 l2cap_sock_set_timer(sk, HZ / 10);
7624 result = L2CAP_CR_SEC_BLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007625 }
7626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007627 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7628 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7629 rsp.result = cpu_to_le16(result);
7630 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7631 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7632 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007633
Mat Martineau81d47572012-04-13 15:38:42 -07007634 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
7635 result == L2CAP_CR_SUCCESS) {
7636 char buf[128];
7637 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
7638 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7639 L2CAP_CONF_REQ,
7640 l2cap_build_conf_req(sk, buf),
7641 buf);
7642 l2cap_pi(sk)->num_conf_req++;
7643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 }
7645
7646 bh_unlock_sock(sk);
7647 }
7648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007649 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650
Brian Gix20de7cf2012-02-02 14:56:51 -08007651 if (smp) {
7652 del_timer(&hcon->smp_timer);
7653 smp_link_encrypt_cmplt(conn, status, encrypt);
7654 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655
7656 return 0;
7657}
7658
7659static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7660{
7661 struct l2cap_conn *conn = hcon->l2cap_data;
7662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007663 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7664 goto drop;
7665
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007666 if (!conn)
7667 conn = l2cap_conn_add(hcon, 0);
7668
7669 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670 goto drop;
7671
7672 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007674 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675 struct l2cap_hdr *hdr;
7676 int len;
7677
7678 if (conn->rx_len) {
7679 BT_ERR("Unexpected start frame (len %d)", skb->len);
7680 kfree_skb(conn->rx_skb);
7681 conn->rx_skb = NULL;
7682 conn->rx_len = 0;
7683 l2cap_conn_unreliable(conn, ECOMM);
7684 }
7685
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007686 /* Start fragment always begin with Basic L2CAP header */
7687 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007688 BT_ERR("Frame is too short (len %d)", skb->len);
7689 l2cap_conn_unreliable(conn, ECOMM);
7690 goto drop;
7691 }
7692
7693 hdr = (struct l2cap_hdr *) skb->data;
7694 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7695
7696 if (len == skb->len) {
7697 /* Complete frame received */
7698 l2cap_recv_frame(conn, skb);
7699 return 0;
7700 }
7701
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007702 if (flags & ACL_CONT) {
7703 BT_ERR("Complete frame is incomplete "
7704 "(len %d, expected len %d)",
7705 skb->len, len);
7706 l2cap_conn_unreliable(conn, ECOMM);
7707 goto drop;
7708 }
7709
Linus Torvalds1da177e2005-04-16 15:20:36 -07007710 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7711
7712 if (skb->len > len) {
7713 BT_ERR("Frame is too long (len %d, expected len %d)",
7714 skb->len, len);
7715 l2cap_conn_unreliable(conn, ECOMM);
7716 goto drop;
7717 }
7718
7719 /* Allocate skb for the complete frame (with header) */
7720 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7721 if (!conn->rx_skb)
7722 goto drop;
7723
7724 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7725 skb->len);
7726 conn->rx_len = len - skb->len;
7727 } else {
7728 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7729
7730 if (!conn->rx_len) {
7731 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7732 l2cap_conn_unreliable(conn, ECOMM);
7733 goto drop;
7734 }
7735
7736 if (skb->len > conn->rx_len) {
7737 BT_ERR("Fragment is too long (len %d, expected %d)",
7738 skb->len, conn->rx_len);
7739 kfree_skb(conn->rx_skb);
7740 conn->rx_skb = NULL;
7741 conn->rx_len = 0;
7742 l2cap_conn_unreliable(conn, ECOMM);
7743 goto drop;
7744 }
7745
7746 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7747 skb->len);
7748 conn->rx_len -= skb->len;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007749
7750 if (!conn->rx_len) {
Marcel Holtmann73863972007-01-22 22:00:40 +01007751 /* Complete frame received */
7752 l2cap_recv_frame(conn, conn->rx_skb);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007753 conn->rx_skb = NULL;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007755 }
7756
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757drop:
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007758 kfree_skb(skb);
7759 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007760}
7761
Srinivas Krovvidi10734192011-12-29 07:29:11 +05307762static void l2cap_set_acl_flushto(struct hci_conn *hcon, u16 flush_to)
7763{
7764 struct hci_cp_write_automatic_flush_timeout flush_tm;
7765 if (hcon && hcon->hdev) {
7766 flush_tm.handle = hcon->handle;
7767 if (flush_to == L2CAP_DEFAULT_FLUSH_TO)
7768 flush_to = 0;
7769 flush_tm.timeout = (flush_to < L2CAP_MAX_FLUSH_TO) ?
7770 flush_to : L2CAP_MAX_FLUSH_TO;
7771 hci_send_cmd(hcon->hdev,
7772 HCI_OP_WRITE_AUTOMATIC_FLUSH_TIMEOUT,
7773 4, &(flush_tm));
7774 }
7775}
7776
7777static u16 l2cap_get_smallest_flushto(struct l2cap_chan_list *l)
7778{
7779 int ret_flush_to = L2CAP_DEFAULT_FLUSH_TO;
7780 struct sock *s;
7781 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
7782 if (l2cap_pi(s)->flush_to > 0 &&
7783 l2cap_pi(s)->flush_to < ret_flush_to)
7784 ret_flush_to = l2cap_pi(s)->flush_to;
7785 }
7786 return ret_flush_to;
7787}
7788
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007789static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007790{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007791 struct sock *sk;
7792 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007794 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007796 sk_for_each(sk, node, &l2cap_sk_list.head) {
7797 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007799 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007800 batostr(&bt_sk(sk)->src),
7801 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007802 sk->sk_state, __le16_to_cpu(pi->psm),
7803 pi->scid, pi->dcid,
7804 pi->imtu, pi->omtu, pi->sec_level,
7805 pi->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007808 read_unlock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007809
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007810 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811}
7812
Subramanian Srinivasan3e7c75d2012-10-08 17:22:43 -07007813static void l2cap_queue_acl_data(struct work_struct *worker)
7814{
7815 struct sock *sk = NULL;
7816 int attempts = 0;
7817 struct sk_buff *skb_rsp;
7818 struct l2cap_hdr *lh;
7819 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7820 L2CAP_ATT_NOT_SUPPORTED};
7821
7822 for (attempts = 0; attempts < 40; attempts++) {
7823 msleep(50);
7824 if (!att_chn_params.conn) {
7825 BT_DBG("att_chn_params.conn is NULL");
7826 return;
7827 }
7828 sk = l2cap_find_sock_by_fixed_cid_and_dir
7829 (att_chn_params.cid,
7830 att_chn_params.conn->src,
7831 att_chn_params.conn->dst,
7832 att_chn_params.dir);
7833 bh_lock_sock(sk);
7834 if (sk->sk_state == BT_CONNECTED) {
7835 sock_queue_rcv_skb(sk, att_chn_params.skb);
7836 if (sk)
7837 bh_unlock_sock(sk);
7838 return;
7839 }
7840 bh_unlock_sock(sk);
7841 }
7842 bh_lock_sock(sk);
7843
7844 if (att_chn_params.skb->data[0] != L2CAP_ATT_INDICATE)
7845 goto not_indicate;
7846
7847 /* If this is an incoming Indication, we are required to confirm */
7848 skb_rsp = bt_skb_alloc(sizeof(u8) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7849 if (!skb_rsp)
7850 goto free_skb;
7851
7852 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7853 lh->len = cpu_to_le16(sizeof(u8));
7854 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7855 err_rsp[0] = L2CAP_ATT_CONFIRM;
7856 memcpy(skb_put(skb_rsp, sizeof(u8)), err_rsp, sizeof(u8));
7857 hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
7858 goto free_skb;
7859
7860not_indicate:
7861 if (att_chn_params.skb->data[0] & L2CAP_ATT_RESPONSE_BIT ||
7862 att_chn_params.skb->data[0] == L2CAP_ATT_CONFIRM)
7863 goto free_skb;
7864
7865 /* If this is an incoming PDU that requires a response, respond with
7866 * a generic error so remote device doesn't hang */
7867
7868 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7869 if (!skb_rsp)
7870 goto free_skb;
7871
7872 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7873 lh->len = cpu_to_le16(sizeof(err_rsp));
7874 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7875 err_rsp[1] = att_chn_params.skb->data[0];
7876 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7877 hci_send_acl(att_chn_params.conn->hcon, NULL, skb_rsp, 0);
7878
7879free_skb:
7880 kfree_skb(att_chn_params.skb);
7881
7882 if (sk)
7883 bh_unlock_sock(sk);
7884
7885}
7886
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007887static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7888{
7889 return single_open(file, l2cap_debugfs_show, inode->i_private);
7890}
7891
7892static const struct file_operations l2cap_debugfs_fops = {
7893 .open = l2cap_debugfs_open,
7894 .read = seq_read,
7895 .llseek = seq_lseek,
7896 .release = single_release,
7897};
7898
7899static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900
Linus Torvalds1da177e2005-04-16 15:20:36 -07007901static struct hci_proto l2cap_hci_proto = {
7902 .name = "L2CAP",
7903 .id = HCI_PROTO_L2CAP,
7904 .connect_ind = l2cap_connect_ind,
7905 .connect_cfm = l2cap_connect_cfm,
7906 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007907 .disconn_cfm = l2cap_disconn_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007908 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007909 .recv_acldata = l2cap_recv_acldata,
7910 .create_cfm = l2cap_create_cfm,
7911 .modify_cfm = l2cap_modify_cfm,
7912 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913};
7914
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007915int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916{
7917 int err;
7918
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007919 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007920 if (err < 0)
7921 return err;
7922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007923 _l2cap_wq = create_singlethread_workqueue("l2cap");
7924 if (!_l2cap_wq) {
7925 err = -ENOMEM;
7926 goto error;
7927 }
7928
Linus Torvalds1da177e2005-04-16 15:20:36 -07007929 err = hci_register_proto(&l2cap_hci_proto);
7930 if (err < 0) {
7931 BT_ERR("L2CAP protocol registration failed");
7932 bt_sock_unregister(BTPROTO_L2CAP);
7933 goto error;
7934 }
7935
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007936 if (bt_debugfs) {
7937 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7938 bt_debugfs, NULL, &l2cap_debugfs_fops);
7939 if (!l2cap_debugfs)
7940 BT_ERR("Failed to create L2CAP debug file");
7941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007943 if (amp_init() < 0) {
7944 BT_ERR("AMP Manager initialization failed");
7945 goto error;
7946 }
7947
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948 return 0;
7949
7950error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007951 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007952 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953 return err;
7954}
7955
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007956void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007957{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007958 amp_exit();
7959
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007960 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007962 flush_workqueue(_l2cap_wq);
7963 destroy_workqueue(_l2cap_wq);
7964
Linus Torvalds1da177e2005-04-16 15:20:36 -07007965 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7966 BT_ERR("L2CAP protocol unregistration failed");
7967
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007968 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007969}
7970
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007971module_param(disable_ertm, bool, 0644);
7972MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007973
Peter Krystad1505bfa2012-06-08 10:47:27 -07007974module_param(enable_hs, bool, 0644);
7975MODULE_PARM_DESC(enable_hs, "Enable A2MP protocol");
7976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007977module_param(enable_reconfig, bool, 0644);
7978MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");