blob: c19be91e64a7317ae0aa5b93c5b99308588a87fc [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Duy Truong790f06d2013-02-13 16:38:12 -08002 Copyright (c) 2010-2012 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
Steve Mucklef132c6c2012-06-06 18:30:57 -070014#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19
20#include <linux/skbuff.h>
21#include <linux/list.h>
22#include <linux/workqueue.h>
23#include <linux/timer.h>
24
25#include <linux/crypto.h>
26#include <linux/scatterlist.h>
27#include <linux/err.h>
28#include <crypto/hash.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/l2cap.h>
33#include <net/bluetooth/amp.h>
34
35static struct workqueue_struct *amp_workqueue;
36
37LIST_HEAD(amp_mgr_list);
38DEFINE_RWLOCK(amp_mgr_list_lock);
39
40static int send_a2mp(struct socket *sock, u8 *data, int len);
41
42static void ctx_timeout(unsigned long data);
43
44static void launch_ctx(struct amp_mgr *mgr);
45static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
46static int kill_ctx(struct amp_ctx *ctx);
47static int cancel_ctx(struct amp_ctx *ctx);
48
49static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
50
51static void remove_amp_mgr(struct amp_mgr *mgr)
52{
53 BT_DBG("mgr %p", mgr);
54
Peter Krystadf5289202011-11-14 15:11:22 -080055 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 list_del(&mgr->list);
Peter Krystadf5289202011-11-14 15:11:22 -080057 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Peter Krystadf5289202011-11-14 15:11:22 -080059 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 while (!list_empty(&mgr->ctx_list)) {
61 struct amp_ctx *ctx;
62 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -080063 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064 BT_DBG("kill ctx %p", ctx);
65 kill_ctx(ctx);
Peter Krystadf5289202011-11-14 15:11:22 -080066 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 }
Peter Krystadf5289202011-11-14 15:11:22 -080068 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70 kfree(mgr->ctrls);
71
72 kfree(mgr);
73}
74
75static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
76{
77 struct amp_mgr *mgr;
78 struct amp_mgr *found = NULL;
79
Peter Krystadf5289202011-11-14 15:11:22 -080080 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 list_for_each_entry(mgr, &amp_mgr_list, list) {
82 if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
83 found = mgr;
84 break;
85 }
86 }
Peter Krystadf5289202011-11-14 15:11:22 -080087 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 return found;
89}
90
Peter Krystad072a51f2012-03-30 12:59:33 -070091static struct amp_mgr *get_create_amp_mgr(struct hci_conn *hcon,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 struct sk_buff *skb)
93{
94 struct amp_mgr *mgr;
95
Peter Krystadf5289202011-11-14 15:11:22 -080096 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 list_for_each_entry(mgr, &amp_mgr_list, list) {
Peter Krystad072a51f2012-03-30 12:59:33 -070098 if (mgr->l2cap_conn == hcon->l2cap_data) {
99 BT_DBG("found %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -0800100 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 goto gc_finished;
102 }
103 }
Peter Krystadf5289202011-11-14 15:11:22 -0800104 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105
106 mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
107 if (!mgr)
Peter Krystadf5289202011-11-14 15:11:22 -0800108 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109
Peter Krystad072a51f2012-03-30 12:59:33 -0700110 mgr->l2cap_conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111 mgr->next_ident = 1;
112 INIT_LIST_HEAD(&mgr->ctx_list);
113 rwlock_init(&mgr->ctx_list_lock);
114 mgr->skb = skb;
Peter Krystad072a51f2012-03-30 12:59:33 -0700115 BT_DBG("hcon %p mgr %p", hcon, mgr);
116 mgr->a2mp_sock = open_fixed_channel(&hcon->hdev->bdaddr, &hcon->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117 if (!mgr->a2mp_sock) {
118 kfree(mgr);
Peter Krystadf5289202011-11-14 15:11:22 -0800119 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 }
Peter Krystadf5289202011-11-14 15:11:22 -0800121 write_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 list_add(&(mgr->list), &amp_mgr_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800123 write_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124
125gc_finished:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return mgr;
127}
128
129static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
130{
131 if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
132 return mgr->ctrls;
133 else
134 return NULL;
135}
136
137static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
138{
139 struct amp_ctrl *ctrl;
140
141 BT_DBG("mgr %p, id %d", mgr, id);
142 if ((mgr->ctrls) && (mgr->ctrls->id == id))
143 ctrl = mgr->ctrls;
144 else {
145 kfree(mgr->ctrls);
146 ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
147 if (ctrl) {
148 ctrl->mgr = mgr;
149 ctrl->id = id;
150 }
151 mgr->ctrls = ctrl;
152 }
153
154 return ctrl;
155}
156
157static struct amp_ctx *create_ctx(u8 type, u8 state)
158{
159 struct amp_ctx *ctx = NULL;
160
161 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
162 if (ctx) {
163 ctx->type = type;
164 ctx->state = state;
165 init_timer(&(ctx->timer));
166 ctx->timer.function = ctx_timeout;
167 ctx->timer.data = (unsigned long) ctx;
168 }
169 BT_DBG("ctx %p, type %d", ctx, type);
170 return ctx;
171}
172
173static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
174{
175 BT_DBG("ctx %p", ctx);
Peter Krystadf5289202011-11-14 15:11:22 -0800176 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 list_add(&ctx->list, &mgr->ctx_list);
Peter Krystadf5289202011-11-14 15:11:22 -0800178 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 ctx->mgr = mgr;
180 execute_ctx(ctx, AMP_INIT, 0);
181}
182
183static void destroy_ctx(struct amp_ctx *ctx)
184{
185 struct amp_mgr *mgr = ctx->mgr;
186
187 BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
188 del_timer(&ctx->timer);
Peter Krystadf5289202011-11-14 15:11:22 -0800189 write_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 list_del(&ctx->list);
Peter Krystadf5289202011-11-14 15:11:22 -0800191 write_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 if (ctx->deferred)
193 execute_ctx(ctx->deferred, AMP_INIT, 0);
194 kfree(ctx);
195}
196
197static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
198{
199 struct amp_ctx *fnd = NULL;
200 struct amp_ctx *ctx;
201
Peter Krystadf5289202011-11-14 15:11:22 -0800202 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 list_for_each_entry(ctx, &mgr->ctx_list, list) {
204 if (ctx->type == type) {
205 fnd = ctx;
206 break;
207 }
208 }
Peter Krystadf5289202011-11-14 15:11:22 -0800209 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 return fnd;
211}
212
213static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
214{
215 struct amp_mgr *mgr = cur->mgr;
216 struct amp_ctx *fnd = NULL;
217 struct amp_ctx *ctx;
218
Peter Krystadf5289202011-11-14 15:11:22 -0800219 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 list_for_each_entry(ctx, &mgr->ctx_list, list) {
221 if ((ctx->type == type) && (ctx != cur)) {
222 fnd = ctx;
223 break;
224 }
225 }
Peter Krystadf5289202011-11-14 15:11:22 -0800226 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 return fnd;
228}
229
230static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
231{
232 struct amp_ctx *fnd = NULL;
233 struct amp_ctx *ctx;
234
Peter Krystadf5289202011-11-14 15:11:22 -0800235 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 list_for_each_entry(ctx, &mgr->ctx_list, list) {
237 if ((ctx->evt_type & AMP_A2MP_RSP) &&
238 (ctx->rsp_ident == ident)) {
239 fnd = ctx;
240 break;
241 }
242 }
Peter Krystadf5289202011-11-14 15:11:22 -0800243 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 return fnd;
245}
246
247static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
248 u16 evt_value)
249{
250 struct amp_mgr *mgr;
251 struct amp_ctx *fnd = NULL;
252
Peter Krystadf5289202011-11-14 15:11:22 -0800253 read_lock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 list_for_each_entry(mgr, &amp_mgr_list, list) {
255 struct amp_ctx *ctx;
Peter Krystadf5289202011-11-14 15:11:22 -0800256 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 list_for_each_entry(ctx, &mgr->ctx_list, list) {
258 struct hci_dev *ctx_hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800259 ctx_hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
261 switch (evt_type) {
262 case AMP_HCI_CMD_STATUS:
263 case AMP_HCI_CMD_CMPLT:
264 if (ctx->opcode == evt_value)
265 fnd = ctx;
266 break;
267 case AMP_HCI_EVENT:
268 if (ctx->evt_code == (u8) evt_value)
269 fnd = ctx;
270 break;
271 }
272 }
273 if (ctx_hdev)
274 hci_dev_put(ctx_hdev);
275
276 if (fnd)
277 break;
278 }
Peter Krystadf5289202011-11-14 15:11:22 -0800279 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 }
Peter Krystadf5289202011-11-14 15:11:22 -0800281 read_unlock(&amp_mgr_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 return fnd;
283}
284
285static inline u8 next_ident(struct amp_mgr *mgr)
286{
287 if (++mgr->next_ident == 0)
288 mgr->next_ident = 1;
289 return mgr->next_ident;
290}
291
292static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
293 u16 len, void *data, u16 len2, void *data2)
294{
295 struct a2mp_cmd_hdr *hdr;
296 int plen;
297 u8 *p, *cmd;
298
299 BT_DBG("ident %d code 0x%02x", ident, code);
300 if (!mgr->a2mp_sock)
301 return;
302 plen = sizeof(*hdr) + len + len2;
303 cmd = kzalloc(plen, GFP_ATOMIC);
304 if (!cmd)
305 return;
306 hdr = (struct a2mp_cmd_hdr *) cmd;
307 hdr->code = code;
308 hdr->ident = ident;
309 hdr->len = cpu_to_le16(len+len2);
310 p = cmd + sizeof(*hdr);
311 memcpy(p, data, len);
312 p += len;
313 memcpy(p, data2, len2);
314 send_a2mp(mgr->a2mp_sock, cmd, plen);
315 kfree(cmd);
316}
317
318static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
319 u8 code, u16 len, void *data)
320{
321 send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
322}
323
324static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
325{
326 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
327 struct a2mp_cmd_rej *rej;
328 struct amp_ctx *ctx;
329
330 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
331 rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
332 if (skb->len < sizeof(*rej))
333 return -EINVAL;
334 BT_DBG("reason %d", le16_to_cpu(rej->reason));
335 ctx = get_ctx_a2mp(mgr, hdr->ident);
336 if (ctx)
337 kill_ctx(ctx);
338 skb_pull(skb, sizeof(*rej));
339 return 0;
340}
341
342static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
343 void *msg)
344{
345 struct a2mp_cl clist[16];
346 struct a2mp_cl *cl;
347 struct hci_dev *hdev;
348 int num_ctrls = 1, id;
349
350 cl = clist;
351 cl->id = 0;
352 cl->type = 0;
353 cl->status = 1;
354
355 for (id = 0; id < 16; ++id) {
356 hdev = hci_dev_get(id);
357 if (hdev) {
358 if ((hdev->amp_type != HCI_BREDR) &&
359 test_bit(HCI_UP, &hdev->flags)) {
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800360 (cl + num_ctrls)->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 (cl + num_ctrls)->type = hdev->amp_type;
362 (cl + num_ctrls)->status = hdev->amp_status;
363 ++num_ctrls;
364 }
365 hci_dev_put(hdev);
366 }
367 }
368 send_a2mp_cmd2(mgr, ident, code, len, msg,
369 num_ctrls*sizeof(*cl), clist);
370
371 return 0;
372}
373
374static void send_a2mp_change_notify(void)
375{
376 struct amp_mgr *mgr;
377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 list_for_each_entry(mgr, &amp_mgr_list, list) {
379 if (mgr->discovered)
380 send_a2mp_cl(mgr, next_ident(mgr),
381 A2MP_CHANGE_NOTIFY, 0, NULL);
382 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383}
384
385static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
386{
387 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
388 struct a2mp_discover_req *req;
389 u16 *efm;
390 struct a2mp_discover_rsp rsp;
391
392 req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
393 if (skb->len < sizeof(*req))
394 return -EINVAL;
395 efm = (u16 *) skb_pull(skb, sizeof(*req));
396
397 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
398 le16_to_cpu(req->ext_feat));
399
400 while (le16_to_cpu(req->ext_feat) & 0x8000) {
401 if (skb->len < sizeof(*efm))
402 return -EINVAL;
403 req->ext_feat = *efm;
404 BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
405 efm = (u16 *) skb_pull(skb, sizeof(*efm));
406 }
407
408 rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
409 rsp.ext_feat = 0;
410
411 mgr->discovered = 1;
412
413 return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
414 sizeof(rsp), &rsp);
415}
416
417static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
418{
419 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
420 struct a2mp_cl *cl;
421
422 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
423 while (skb->len >= sizeof(*cl)) {
424 struct amp_ctrl *ctrl;
425 if (cl->id != 0) {
426 ctrl = get_create_ctrl(mgr, cl->id);
427 if (ctrl != NULL) {
428 ctrl->type = cl->type;
429 ctrl->status = cl->status;
430 }
431 }
432 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
433 }
434
435 /* TODO find controllers in manager that were not on received */
436 /* controller list and destroy them */
437 send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
438
439 return 0;
440}
441
442static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
443{
444 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
445 u8 *data;
446 int id;
447 struct hci_dev *hdev;
448 struct a2mp_getinfo_rsp rsp;
449
450 data = (u8 *) skb_pull(skb, sizeof(*hdr));
451 if (le16_to_cpu(hdr->len) < sizeof(*data))
452 return -EINVAL;
453 if (skb->len < sizeof(*data))
454 return -EINVAL;
455 id = *data;
456 skb_pull(skb, sizeof(*data));
457 rsp.id = id;
458 rsp.status = 1;
459
460 BT_DBG("id %d", id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800461 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
463 if (hdev && hdev->amp_type != HCI_BREDR) {
464 rsp.status = 0;
465 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
466 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
467 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
468 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
469 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
470 }
471
472 send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
473
474 if (hdev)
475 hci_dev_put(hdev);
476
477 return 0;
478}
479
480static void create_physical(struct l2cap_conn *conn, struct sock *sk)
481{
482 struct amp_mgr *mgr;
483 struct amp_ctx *ctx = NULL;
484
485 BT_DBG("conn %p", conn);
Peter Krystad072a51f2012-03-30 12:59:33 -0700486 mgr = get_create_amp_mgr(conn->hcon, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (!mgr)
488 goto cp_finished;
489 BT_DBG("mgr %p", mgr);
490 ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
491 if (!ctx)
492 goto cp_finished;
493 ctx->sk = sk;
494 sock_hold(sk);
495 start_ctx(mgr, ctx);
496 return;
497
498cp_finished:
499 l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
500}
501
502static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
503{
504 struct amp_mgr *mgr;
505 struct hci_dev *hdev;
506 struct hci_conn *conn;
507 struct amp_ctx *aplctx = NULL;
508 u8 remote_id = 0;
509 int result = -EINVAL;
510
511 BT_DBG("lcon %p", lcon);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800512 hdev = hci_dev_get(id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 if (!hdev)
514 goto ap_finished;
515 BT_DBG("hdev %p", hdev);
Peter Krystad072a51f2012-03-30 12:59:33 -0700516 mgr = get_create_amp_mgr(lcon->hcon, NULL);
Peter Krystadf7dcc792011-11-14 15:11:58 -0800517 if (!mgr)
518 goto ap_finished;
519 BT_DBG("mgr %p", mgr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
521 &mgr->l2cap_conn->hcon->dst);
522 if (conn) {
523 BT_DBG("conn %p", hdev);
524 result = 0;
525 remote_id = conn->dst_id;
526 goto ap_finished;
527 }
528 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
529 if (!aplctx)
530 goto ap_finished;
531 aplctx->sk = sk;
532 sock_hold(sk);
533 return;
534
535ap_finished:
Peter Krystadf7dcc792011-11-14 15:11:58 -0800536 if (hdev)
537 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 l2cap_amp_physical_complete(result, id, remote_id, sk);
539}
540
541static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
542{
543 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
544 struct amp_ctx *ctx;
545 struct a2mp_getampassoc_req *req;
546
547 if (hdr->len < sizeof(*req))
548 return -EINVAL;
549 req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
550 skb_pull(skb, sizeof(*req));
551
552 ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
553 if (!ctx)
554 return -ENOMEM;
555 ctx->id = req->id;
556 ctx->d.gaa.req_ident = hdr->ident;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800557 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 if (ctx->hdev)
559 ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
560 GFP_ATOMIC);
561 start_ctx(mgr, ctx);
562 return 0;
563}
564
565static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
566{
567 struct sk_buff *skb = (struct sk_buff *) data;
568 struct hci_cp_read_local_amp_assoc cp;
569 struct hci_rp_read_local_amp_assoc *rp;
570 struct a2mp_getampassoc_rsp rsp;
571 u16 rem_len;
572 u16 frag_len;
573
574 rsp.status = 1;
575 if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
576 goto gaa_finished;
577
578 switch (ctx->state) {
579 case AMP_GAA_INIT:
580 ctx->state = AMP_GAA_RLAA_COMPLETE;
581 ctx->evt_type = AMP_HCI_CMD_CMPLT;
582 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
583 ctx->d.gaa.len_so_far = 0;
584 cp.phy_handle = 0;
585 cp.len_so_far = 0;
586 cp.max_len = ctx->hdev->amp_assoc_size;
587 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
588 break;
589
590 case AMP_GAA_RLAA_COMPLETE:
591 if (skb->len < 4)
592 goto gaa_finished;
593 rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
594 if (rp->status)
595 goto gaa_finished;
596 rem_len = le16_to_cpu(rp->rem_len);
597 skb_pull(skb, 4);
598 frag_len = skb->len;
599
600 if (ctx->d.gaa.len_so_far + rem_len <=
601 ctx->hdev->amp_assoc_size) {
602 struct hci_cp_read_local_amp_assoc cp;
603 u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
604 memcpy(assoc, rp->frag, frag_len);
605 ctx->d.gaa.len_so_far += rem_len;
606 rem_len -= frag_len;
607 if (rem_len == 0) {
608 rsp.status = 0;
609 goto gaa_finished;
610 }
611 /* more assoc data to read */
612 cp.phy_handle = 0;
613 cp.len_so_far = ctx->d.gaa.len_so_far;
614 cp.max_len = ctx->hdev->amp_assoc_size;
615 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
616 }
617 break;
618
619 default:
620 goto gaa_finished;
621 break;
622 }
623 return 0;
624
625gaa_finished:
626 rsp.id = ctx->id;
627 send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
628 sizeof(rsp), &rsp,
629 ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
630 kfree(ctx->d.gaa.assoc);
631 if (ctx->hdev)
632 hci_dev_put(ctx->hdev);
633 return 1;
634}
635
636struct hmac_sha256_result {
637 struct completion completion;
638 int err;
639};
640
641static void hmac_sha256_final(struct crypto_async_request *req, int err)
642{
643 struct hmac_sha256_result *r = req->data;
644 if (err == -EINPROGRESS)
645 return;
646 r->err = err;
647 complete(&r->completion);
648}
649
650int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
651 u8 *output, u8 outlen)
652{
653 int ret = 0;
654 struct crypto_ahash *tfm;
655 struct scatterlist sg;
656 struct ahash_request *req;
657 struct hmac_sha256_result tresult;
658 void *hash_buff = NULL;
659
660 unsigned char hash_result[64];
661 int i;
662
663 memset(output, 0, outlen);
664
665 init_completion(&tresult.completion);
666
667 tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
668 CRYPTO_ALG_TYPE_AHASH_MASK);
669 if (IS_ERR(tfm)) {
670 BT_DBG("crypto_alloc_ahash failed");
671 ret = PTR_ERR(tfm);
672 goto err_tfm;
673 }
674
675 req = ahash_request_alloc(tfm, GFP_KERNEL);
676 if (!req) {
677 BT_DBG("failed to allocate request for hmac(sha256)");
678 ret = -ENOMEM;
679 goto err_req;
680 }
681
682 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
683 hmac_sha256_final, &tresult);
684
685 hash_buff = kzalloc(psize, GFP_KERNEL);
686 if (!hash_buff) {
687 BT_DBG("failed to kzalloc hash_buff");
688 ret = -ENOMEM;
689 goto err_hash_buf;
690 }
691
692 memset(hash_result, 0, 64);
693 memcpy(hash_buff, plaintext, psize);
694 sg_init_one(&sg, hash_buff, psize);
695
696 if (ksize) {
697 crypto_ahash_clear_flags(tfm, ~0);
698 ret = crypto_ahash_setkey(tfm, key, ksize);
699
700 if (ret) {
701 BT_DBG("crypto_ahash_setkey failed");
702 goto err_setkey;
703 }
704 }
705
706 ahash_request_set_crypt(req, &sg, hash_result, psize);
707 ret = crypto_ahash_digest(req);
708
709 BT_DBG("ret 0x%x", ret);
710
711 switch (ret) {
712 case 0:
713 for (i = 0; i < outlen; i++)
714 output[i] = hash_result[i];
715 break;
716 case -EINPROGRESS:
717 case -EBUSY:
718 ret = wait_for_completion_interruptible(&tresult.completion);
719 if (!ret && !tresult.err) {
720 INIT_COMPLETION(tresult.completion);
721 break;
722 } else {
723 BT_DBG("wait_for_completion_interruptible failed");
724 if (!ret)
725 ret = tresult.err;
726 goto out;
727 }
728 default:
729 goto out;
730 }
731
732out:
733err_setkey:
734 kfree(hash_buff);
735err_hash_buf:
736 ahash_request_free(req);
737err_req:
738 crypto_free_ahash(tfm);
739err_tfm:
740 return ret;
741}
742
743static void show_key(u8 *k)
744{
745 int i = 0;
746 for (i = 0; i < 32; i += 8)
747 BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
748 *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
749 *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
750}
751
752static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
753{
754 u8 bt2_key[32];
755 u8 gamp_key[32];
756 u8 b802_key[32];
757 int result;
758
759 if (!hci_conn_check_link_mode(conn))
760 return -EACCES;
761
762 BT_DBG("key_type %d", conn->key_type);
763 if (conn->key_type < 3)
764 return -EACCES;
765
766 *type = conn->key_type;
767 *len = 32;
768 memcpy(&bt2_key[0], conn->link_key, 16);
769 memcpy(&bt2_key[16], conn->link_key, 16);
770 result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
771 if (result)
772 goto ps_finished;
773
774 if (conn->key_type == 3) {
775 BT_DBG("gamp_key");
776 show_key(gamp_key);
777 memcpy(data, gamp_key, 32);
778 goto ps_finished;
779 }
780
781 result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
782 if (result)
783 goto ps_finished;
784
785 BT_DBG("802b_key");
786 show_key(b802_key);
787 memcpy(data, b802_key, 32);
788
789ps_finished:
790 return result;
791}
792
793static u8 amp_next_handle;
794static inline u8 physlink_handle(struct hci_dev *hdev)
795{
796 /* TODO amp_next_handle should be part of hci_dev */
797 if (amp_next_handle == 0)
798 amp_next_handle = 1;
799 return amp_next_handle++;
800}
801
802/* Start an Accept Physical Link sequence */
803static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
804{
805 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
806 struct amp_ctx *ctx = NULL;
807 struct a2mp_createphyslink_req *req;
808
809 if (hdr->len < sizeof(*req))
810 return -EINVAL;
811 req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
812 skb_pull(skb, sizeof(*req));
813 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
814
815 /* initialize the context */
816 ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
817 if (!ctx)
818 return -ENOMEM;
819 ctx->d.apl.req_ident = hdr->ident;
820 ctx->d.apl.remote_id = req->local_id;
821 ctx->id = req->remote_id;
822
823 /* add the supplied remote assoc to the context */
824 ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
825 if (ctx->d.apl.remote_assoc)
826 memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
827 ctx->d.apl.len_so_far = 0;
828 ctx->d.apl.rem_len = skb->len;
829 skb_pull(skb, skb->len);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -0800830 ctx->hdev = hci_dev_get(ctx->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 start_ctx(mgr, ctx);
832 return 0;
833}
834
835static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
836{
837 struct sk_buff *skb = data;
838 struct hci_cp_accept_phys_link acp;
839 struct hci_cp_write_remote_amp_assoc wcp;
840 struct hci_rp_write_remote_amp_assoc *wrp;
841 struct hci_ev_cmd_status *cs = data;
842 struct hci_ev_phys_link_complete *ev;
843 struct a2mp_createphyslink_rsp rsp;
844 struct amp_ctx *cplctx;
845 struct amp_ctx *aplctx;
846 u16 frag_len;
847 struct hci_conn *conn;
848 int result;
849
850 BT_DBG("state %d", ctx->state);
851 result = -EINVAL;
852 rsp.status = 1; /* Invalid Controller ID */
853 if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
854 goto apl_finished;
855 if (evt_type == AMP_KILLED) {
856 result = -EAGAIN;
857 rsp.status = 4; /* Disconnect request received */
858 goto apl_finished;
859 }
860 if (!ctx->d.apl.remote_assoc) {
861 result = -ENOMEM;
862 rsp.status = 2; /* Unable to Start */
863 goto apl_finished;
864 }
865
866 switch (ctx->state) {
867 case AMP_APL_INIT:
868 BT_DBG("local_id %d, remote_id %d",
869 ctx->id, ctx->d.apl.remote_id);
870 conn = hci_conn_hash_lookup_id(ctx->hdev,
871 &ctx->mgr->l2cap_conn->hcon->dst,
872 ctx->d.apl.remote_id);
873 if (conn) {
874 result = -EEXIST;
875 rsp.status = 5; /* Already Exists */
876 goto apl_finished;
877 }
878
879 aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
880 if ((aplctx) &&
881 (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
882 BT_DBG("deferred to %p", aplctx);
883 aplctx->deferred = ctx;
884 break;
885 }
886
887 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
888 if ((cplctx) &&
889 (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
890 struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
891 BT_DBG("local %s remote %s",
892 batostr(&bcon->hdev->bdaddr),
893 batostr(&bcon->dst));
894 if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
895 (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
896 BT_DBG("COLLISION LOSER");
897 cplctx->deferred = ctx;
898 cancel_ctx(cplctx);
899 break;
900 } else {
901 BT_DBG("COLLISION WINNER");
902 result = -EISCONN;
903 rsp.status = 3; /* Collision */
904 goto apl_finished;
905 }
906 }
907
908 result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
909 &acp.key_len, &acp.type);
910 if (result) {
911 BT_DBG("SECURITY");
912 rsp.status = 6; /* Security Violation */
913 goto apl_finished;
914 }
915
916 ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
917 ctx->state = AMP_APL_APL_STATUS;
918 ctx->evt_type = AMP_HCI_CMD_STATUS;
919 ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
920 acp.phy_handle = ctx->d.apl.phy_handle;
921 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
922 break;
923
924 case AMP_APL_APL_STATUS:
925 if (cs->status != 0)
926 goto apl_finished;
927 /* PAL will accept link, send a2mp response */
928 rsp.local_id = ctx->id;
929 rsp.remote_id = ctx->d.apl.remote_id;
930 rsp.status = 0;
931 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
932 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
933
934 /* send the first assoc fragment */
935 wcp.phy_handle = ctx->d.apl.phy_handle;
936 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
937 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
938 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
939 memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
940 ctx->state = AMP_APL_WRA_COMPLETE;
941 ctx->evt_type = AMP_HCI_CMD_CMPLT;
942 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
943 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
944 break;
945
946 case AMP_APL_WRA_COMPLETE:
947 /* received write remote amp assoc command complete event */
948 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
949 if (wrp->status != 0)
950 goto apl_finished;
951 if (wrp->phy_handle != ctx->d.apl.phy_handle)
952 goto apl_finished;
953 /* update progress */
954 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
955 ctx->d.apl.len_so_far += frag_len;
956 ctx->d.apl.rem_len -= frag_len;
957 if (ctx->d.apl.rem_len > 0) {
958 u8 *assoc;
959 /* another assoc fragment to send */
960 wcp.phy_handle = ctx->d.apl.phy_handle;
961 wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
962 wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
963 frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
964 assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
965 memcpy(wcp.frag, assoc, frag_len);
966 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
967 break;
968 }
969 /* wait for physical link complete event */
970 ctx->state = AMP_APL_PL_COMPLETE;
971 ctx->evt_type = AMP_HCI_EVENT;
972 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
973 break;
974
975 case AMP_APL_PL_COMPLETE:
976 /* physical link complete event received */
977 if (skb->len < sizeof(*ev))
978 goto apl_finished;
979 ev = (struct hci_ev_phys_link_complete *) skb->data;
980 if (ev->phy_handle != ctx->d.apl.phy_handle)
981 break;
982 if (ev->status != 0)
983 goto apl_finished;
984 conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
985 if (!conn)
986 goto apl_finished;
987 result = 0;
988 BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
989 conn->dst_id = ctx->d.apl.remote_id;
990 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
991 goto apl_finished;
992 break;
993
994 default:
995 goto apl_finished;
996 break;
997 }
998 return 0;
999
1000apl_finished:
1001 if (ctx->sk)
1002 l2cap_amp_physical_complete(result, ctx->id,
1003 ctx->d.apl.remote_id, ctx->sk);
1004 if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
1005 rsp.local_id = ctx->id;
1006 rsp.remote_id = ctx->d.apl.remote_id;
1007 send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
1008 A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
1009 }
1010 kfree(ctx->d.apl.remote_assoc);
1011 if (ctx->sk)
1012 sock_put(ctx->sk);
1013 if (ctx->hdev)
1014 hci_dev_put(ctx->hdev);
1015 return 1;
1016}
1017
1018static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
1019{
1020 struct hci_cp_disconn_phys_link dcp;
1021
1022 ctx->state = AMP_CPL_PL_CANCEL;
1023 ctx->evt_type = AMP_HCI_EVENT;
1024 ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
1025 dcp.phy_handle = ctx->d.cpl.phy_handle;
1026 dcp.reason = reason;
1027 hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
1028}
1029
1030static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
1031{
1032 struct amp_ctrl *ctrl;
1033 struct sk_buff *skb = data;
1034 struct a2mp_cmd_hdr *hdr;
1035 struct hci_ev_cmd_status *cs = data;
1036 struct amp_ctx *cplctx;
1037 struct a2mp_discover_req dreq;
1038 struct a2mp_discover_rsp *drsp;
1039 u16 *efm;
1040 struct a2mp_getinfo_req greq;
1041 struct a2mp_getinfo_rsp *grsp;
1042 struct a2mp_cl *cl;
1043 struct a2mp_getampassoc_req areq;
1044 struct a2mp_getampassoc_rsp *arsp;
1045 struct hci_cp_create_phys_link cp;
1046 struct hci_cp_write_remote_amp_assoc wcp;
1047 struct hci_rp_write_remote_amp_assoc *wrp;
1048 struct hci_ev_channel_selected *cev;
1049 struct hci_cp_read_local_amp_assoc rcp;
1050 struct hci_rp_read_local_amp_assoc *rrp;
1051 struct a2mp_createphyslink_req creq;
1052 struct a2mp_createphyslink_rsp *crsp;
1053 struct hci_ev_phys_link_complete *pev;
1054 struct hci_ev_disconn_phys_link_complete *dev;
1055 u8 *assoc, *rassoc, *lassoc;
1056 u16 frag_len;
1057 u16 rem_len;
1058 int result = -EAGAIN;
1059 struct hci_conn *conn;
1060
1061 BT_DBG("state %d", ctx->state);
1062 if (evt_type == AMP_KILLED)
1063 goto cpl_finished;
1064
1065 if (evt_type == AMP_CANCEL) {
1066 if ((ctx->state < AMP_CPL_CPL_STATUS) ||
1067 ((ctx->state == AMP_CPL_PL_COMPLETE) &&
1068 !(ctx->evt_type & AMP_HCI_EVENT)))
1069 goto cpl_finished;
1070
1071 cancel_cpl_ctx(ctx, 0x16);
1072 return 0;
1073 }
1074
1075 switch (ctx->state) {
1076 case AMP_CPL_INIT:
1077 cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
1078 if (cplctx) {
1079 BT_DBG("deferred to %p", cplctx);
1080 cplctx->deferred = ctx;
1081 break;
1082 }
1083 ctx->state = AMP_CPL_DISC_RSP;
1084 ctx->evt_type = AMP_A2MP_RSP;
1085 ctx->rsp_ident = next_ident(ctx->mgr);
1086 dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
1087 dreq.ext_feat = 0;
1088 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
1089 sizeof(dreq), &dreq);
1090 break;
1091
1092 case AMP_CPL_DISC_RSP:
1093 drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
1094 if (skb->len < (sizeof(*drsp))) {
1095 result = -EINVAL;
1096 goto cpl_finished;
1097 }
1098
1099 efm = (u16 *) skb_pull(skb, sizeof(*drsp));
1100 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
1101 le16_to_cpu(drsp->ext_feat));
1102
1103 while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
1104 if (skb->len < sizeof(*efm)) {
1105 result = -EINVAL;
1106 goto cpl_finished;
1107 }
1108 drsp->ext_feat = *efm;
1109 BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
1110 efm = (u16 *) skb_pull(skb, sizeof(*efm));
1111 }
1112 cl = (struct a2mp_cl *) efm;
1113
1114 /* find the first remote and local controller with the
1115 * same type
1116 */
1117 greq.id = 0;
1118 result = -ENODEV;
1119 while (skb->len >= sizeof(*cl)) {
1120 if ((cl->id != 0) && (greq.id == 0)) {
1121 struct hci_dev *hdev;
1122 hdev = hci_dev_get_type(cl->type);
1123 if (hdev) {
1124 struct hci_conn *conn;
1125 ctx->hdev = hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001126 ctx->id = hdev->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001127 ctx->d.cpl.remote_id = cl->id;
1128 conn = hci_conn_hash_lookup_ba(hdev,
1129 ACL_LINK,
1130 &ctx->mgr->l2cap_conn->hcon->dst);
1131 if (conn) {
1132 BT_DBG("PL_COMPLETE exists %x",
1133 (int) conn->handle);
1134 result = 0;
1135 }
1136 ctrl = get_create_ctrl(ctx->mgr,
1137 cl->id);
1138 if (ctrl) {
1139 ctrl->type = cl->type;
1140 ctrl->status = cl->status;
1141 }
1142 greq.id = cl->id;
1143 }
1144 }
1145 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
1146 }
1147 if ((!greq.id) || (!result))
1148 goto cpl_finished;
1149 ctx->state = AMP_CPL_GETINFO_RSP;
1150 ctx->evt_type = AMP_A2MP_RSP;
1151 ctx->rsp_ident = next_ident(ctx->mgr);
1152 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
1153 sizeof(greq), &greq);
1154 break;
1155
1156 case AMP_CPL_GETINFO_RSP:
1157 if (skb->len < sizeof(*grsp))
1158 goto cpl_finished;
1159 grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
Peter Krystad02a952a2012-01-24 12:46:00 -08001160 skb_pull(skb, sizeof(*grsp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161 if (grsp->status)
1162 goto cpl_finished;
1163 if (grsp->id != ctx->d.cpl.remote_id)
1164 goto cpl_finished;
1165 ctrl = get_ctrl(ctx->mgr, grsp->id);
1166 if (!ctrl)
1167 goto cpl_finished;
1168 ctrl->status = grsp->status;
1169 ctrl->total_bw = le32_to_cpu(grsp->total_bw);
1170 ctrl->max_bw = le32_to_cpu(grsp->max_bw);
1171 ctrl->min_latency = le32_to_cpu(grsp->min_latency);
1172 ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
1173 ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174
1175 ctx->d.cpl.max_len = ctrl->max_assoc_size;
1176
1177 /* setup up GAA request */
1178 areq.id = ctx->d.cpl.remote_id;
1179
1180 /* advance context state */
1181 ctx->state = AMP_CPL_GAA_RSP;
1182 ctx->evt_type = AMP_A2MP_RSP;
1183 ctx->rsp_ident = next_ident(ctx->mgr);
1184 send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
1185 sizeof(areq), &areq);
1186 break;
1187
1188 case AMP_CPL_GAA_RSP:
1189 if (skb->len < sizeof(*arsp))
1190 goto cpl_finished;
1191 hdr = (void *) skb->data;
1192 arsp = (void *) skb_pull(skb, sizeof(*hdr));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 if (arsp->status != 0)
1194 goto cpl_finished;
1195
1196 /* store away remote assoc */
1197 assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
1198 ctx->d.cpl.len_so_far = 0;
1199 ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
Peter Krystad02a952a2012-01-24 12:46:00 -08001200 skb_pull(skb, ctx->d.cpl.rem_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
1202 if (!rassoc)
1203 goto cpl_finished;
1204 memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
1205 ctx->d.cpl.remote_assoc = rassoc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206
1207 /* set up CPL command */
1208 ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
1209 cp.phy_handle = ctx->d.cpl.phy_handle;
1210 if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
1211 &cp.key_len, &cp.type)) {
1212 result = -EPERM;
1213 goto cpl_finished;
1214 }
1215
1216 /* advance context state */
1217 ctx->state = AMP_CPL_CPL_STATUS;
1218 ctx->evt_type = AMP_HCI_CMD_STATUS;
1219 ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
1220 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
1221 break;
1222
1223 case AMP_CPL_CPL_STATUS:
1224 /* received create physical link command status */
1225 if (cs->status != 0)
1226 goto cpl_finished;
1227 /* send the first assoc fragment */
1228 wcp.phy_handle = ctx->d.cpl.phy_handle;
1229 wcp.len_so_far = ctx->d.cpl.len_so_far;
1230 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1231 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1232 memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
1233 ctx->state = AMP_CPL_WRA_COMPLETE;
1234 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1235 ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
1236 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1237 break;
1238
1239 case AMP_CPL_WRA_COMPLETE:
1240 /* received write remote amp assoc command complete event */
1241 if (skb->len < sizeof(*wrp))
1242 goto cpl_finished;
1243 wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
1244 if (wrp->status != 0)
1245 goto cpl_finished;
1246 if (wrp->phy_handle != ctx->d.cpl.phy_handle)
1247 goto cpl_finished;
1248
1249 /* update progress */
1250 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1251 ctx->d.cpl.len_so_far += frag_len;
1252 ctx->d.cpl.rem_len -= frag_len;
1253 if (ctx->d.cpl.rem_len > 0) {
1254 /* another assoc fragment to send */
1255 wcp.phy_handle = ctx->d.cpl.phy_handle;
1256 wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
1257 wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
1258 frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
1259 memcpy(wcp.frag,
1260 ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
1261 frag_len);
1262 hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
1263 break;
1264 }
1265 /* now wait for channel selected event */
1266 ctx->state = AMP_CPL_CHANNEL_SELECT;
1267 ctx->evt_type = AMP_HCI_EVENT;
1268 ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
1269 break;
1270
1271 case AMP_CPL_CHANNEL_SELECT:
1272 /* received channel selection event */
1273 if (skb->len < sizeof(*cev))
1274 goto cpl_finished;
1275 cev = (void *) skb->data;
1276/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
1277 Create Physical Link collision scenario
1278 if (cev->phy_handle != ctx->d.cpl.phy_handle)
1279 goto cpl_finished;
1280*/
1281
1282 /* request the first local assoc fragment */
1283 rcp.phy_handle = ctx->d.cpl.phy_handle;
1284 rcp.len_so_far = 0;
1285 rcp.max_len = ctx->d.cpl.max_len;
1286 lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
1287 if (!lassoc)
1288 goto cpl_finished;
1289 ctx->d.cpl.local_assoc = lassoc;
1290 ctx->d.cpl.len_so_far = 0;
1291 ctx->state = AMP_CPL_RLA_COMPLETE;
1292 ctx->evt_type = AMP_HCI_CMD_CMPLT;
1293 ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
1294 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1295 break;
1296
1297 case AMP_CPL_RLA_COMPLETE:
1298 /* received read local amp assoc command complete event */
1299 if (skb->len < 4)
1300 goto cpl_finished;
1301 rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
1302 if (rrp->status)
1303 goto cpl_finished;
1304 if (rrp->phy_handle != ctx->d.cpl.phy_handle)
1305 goto cpl_finished;
1306 rem_len = le16_to_cpu(rrp->rem_len);
1307 skb_pull(skb, 4);
1308 frag_len = skb->len;
1309
1310 if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
1311 goto cpl_finished;
1312
1313 /* save this fragment in context */
1314 lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
1315 memcpy(lassoc, rrp->frag, frag_len);
1316 ctx->d.cpl.len_so_far += frag_len;
1317 rem_len -= frag_len;
1318 if (rem_len > 0) {
1319 /* request another local assoc fragment */
1320 rcp.phy_handle = ctx->d.cpl.phy_handle;
1321 rcp.len_so_far = ctx->d.cpl.len_so_far;
1322 rcp.max_len = ctx->d.cpl.max_len;
1323 hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
1324 } else {
1325 creq.local_id = ctx->id;
1326 creq.remote_id = ctx->d.cpl.remote_id;
1327 /* wait for A2MP rsp AND phys link complete event */
1328 ctx->state = AMP_CPL_PL_COMPLETE;
1329 ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
1330 ctx->rsp_ident = next_ident(ctx->mgr);
1331 ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
1332 send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
1333 A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
1334 ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
1335 }
1336 break;
1337
1338 case AMP_CPL_PL_COMPLETE:
1339 if (evt_type == AMP_A2MP_RSP) {
1340 /* create physical link response received */
1341 ctx->evt_type &= ~AMP_A2MP_RSP;
1342 if (skb->len < sizeof(*crsp))
1343 goto cpl_finished;
1344 crsp = (void *) skb_pull(skb, sizeof(*hdr));
1345 if ((crsp->local_id != ctx->d.cpl.remote_id) ||
1346 (crsp->remote_id != ctx->id) ||
1347 (crsp->status != 0)) {
1348 cancel_cpl_ctx(ctx, 0x13);
1349 break;
1350 }
1351
1352 /* notify Qualcomm PAL */
1353 if (ctx->hdev->manufacturer == 0x001d)
1354 hci_send_cmd(ctx->hdev,
1355 hci_opcode_pack(0x3f, 0x00), 0, NULL);
1356 }
1357 if (evt_type == AMP_HCI_EVENT) {
1358 ctx->evt_type &= ~AMP_HCI_EVENT;
1359 /* physical link complete event received */
1360 if (skb->len < sizeof(*pev))
1361 goto cpl_finished;
1362 pev = (void *) skb->data;
1363 if (pev->phy_handle != ctx->d.cpl.phy_handle)
1364 break;
1365 if (pev->status != 0)
1366 goto cpl_finished;
1367 }
1368 if (ctx->evt_type)
1369 break;
1370 conn = hci_conn_hash_lookup_handle(ctx->hdev,
1371 ctx->d.cpl.phy_handle);
1372 if (!conn)
1373 goto cpl_finished;
1374 result = 0;
1375 BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
1376 bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
1377 conn->dst_id = ctx->d.cpl.remote_id;
1378 conn->out = 1;
1379 goto cpl_finished;
1380 break;
1381
1382 case AMP_CPL_PL_CANCEL:
1383 dev = (void *) skb->data;
1384 BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
1385 result = -EISCONN;
1386 goto cpl_finished;
1387 break;
1388
1389 default:
1390 goto cpl_finished;
1391 break;
1392 }
1393 return 0;
1394
1395cpl_finished:
1396 l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
1397 ctx->sk);
1398 if (ctx->sk)
1399 sock_put(ctx->sk);
1400 if (ctx->hdev)
1401 hci_dev_put(ctx->hdev);
1402 kfree(ctx->d.cpl.remote_assoc);
1403 kfree(ctx->d.cpl.local_assoc);
1404 return 1;
1405}
1406
1407static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
1408{
1409 struct a2mp_cmd_hdr *hdr = (void *) skb->data;
1410 struct a2mp_disconnphyslink_req *req;
1411 struct a2mp_disconnphyslink_rsp rsp;
1412 struct hci_dev *hdev;
1413 struct hci_conn *conn;
1414 struct amp_ctx *aplctx;
1415
1416 BT_DBG("mgr %p skb %p", mgr, skb);
1417 if (hdr->len < sizeof(*req))
1418 return -EINVAL;
1419 req = (void *) skb_pull(skb, sizeof(*hdr));
1420 skb_pull(skb, sizeof(*req));
1421
1422 rsp.local_id = req->remote_id;
1423 rsp.remote_id = req->local_id;
1424 rsp.status = 0;
1425 BT_DBG("local_id %d remote_id %d",
1426 (int) rsp.local_id, (int) rsp.remote_id);
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08001427 hdev = hci_dev_get(rsp.local_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 if (!hdev) {
1429 rsp.status = 1; /* Invalid Controller ID */
1430 goto dpl_finished;
1431 }
1432 BT_DBG("hdev %p", hdev);
1433 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1434 &mgr->l2cap_conn->hcon->dst);
1435 if (!conn) {
1436 aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
1437 if (aplctx) {
1438 kill_ctx(aplctx);
1439 rsp.status = 0;
1440 goto dpl_finished;
1441 }
1442 rsp.status = 2; /* No Physical Link exists */
1443 goto dpl_finished;
1444 }
1445 BT_DBG("conn %p", conn);
1446 hci_disconnect(conn, 0x13);
1447
1448dpl_finished:
1449 send_a2mp_cmd(mgr, hdr->ident,
1450 A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
1451 if (hdev)
1452 hci_dev_put(hdev);
1453 return 0;
1454}
1455
1456static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
1457{
1458 struct amp_mgr *mgr = ctx->mgr;
1459 u8 finished = 0;
1460
1461 if (!mgr->connected)
1462 return 0;
1463
1464 switch (ctx->type) {
1465 case AMP_GETAMPASSOC:
1466 finished = getampassoc_handler(ctx, evt_type, data);
1467 break;
1468 case AMP_CREATEPHYSLINK:
1469 finished = createphyslink_handler(ctx, evt_type, data);
1470 break;
1471 case AMP_ACCEPTPHYSLINK:
1472 finished = acceptphyslink_handler(ctx, evt_type, data);
1473 break;
1474 }
1475
1476 if (!finished)
1477 mod_timer(&(ctx->timer), jiffies +
1478 msecs_to_jiffies(A2MP_RSP_TIMEOUT));
1479 else
1480 destroy_ctx(ctx);
1481 return finished;
1482}
1483
1484static int cancel_ctx(struct amp_ctx *ctx)
1485{
1486 return execute_ctx(ctx, AMP_CANCEL, 0);
1487}
1488
1489static int kill_ctx(struct amp_ctx *ctx)
1490{
1491 return execute_ctx(ctx, AMP_KILLED, 0);
1492}
1493
1494static void ctx_timeout_worker(struct work_struct *w)
1495{
1496 struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
1497 struct amp_ctx *ctx = work->ctx;
1498 kill_ctx(ctx);
1499 kfree(work);
1500}
1501
1502static void ctx_timeout(unsigned long data)
1503{
1504 struct amp_ctx *ctx = (struct amp_ctx *) data;
1505 struct amp_work_ctx_timeout *work;
1506
1507 BT_DBG("ctx %p", ctx);
1508 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1509 if (work) {
1510 INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
1511 work->ctx = ctx;
1512 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1513 kfree(work);
1514 }
1515}
1516
1517static void launch_ctx(struct amp_mgr *mgr)
1518{
1519 struct amp_ctx *ctx = NULL;
1520
1521 BT_DBG("mgr %p", mgr);
Peter Krystadf5289202011-11-14 15:11:22 -08001522 read_lock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523 if (!list_empty(&mgr->ctx_list))
1524 ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
Peter Krystadf5289202011-11-14 15:11:22 -08001525 read_unlock(&mgr->ctx_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 BT_DBG("ctx %p", ctx);
1527 if (ctx)
1528 execute_ctx(ctx, AMP_INIT, NULL);
1529}
1530
1531static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
1532{
1533 struct amp_ctx *ctx;
1534 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1535 u16 hdr_len = le16_to_cpu(hdr->len);
1536
1537 /* find context waiting for A2MP rsp with this rsp's identifier */
1538 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
1539 ctx = get_ctx_a2mp(mgr, hdr->ident);
1540 if (ctx) {
1541 execute_ctx(ctx, AMP_A2MP_RSP, skb);
1542 } else {
1543 BT_DBG("context not found");
1544 skb_pull(skb, sizeof(*hdr));
1545 if (hdr_len > skb->len)
1546 hdr_len = skb->len;
1547 skb_pull(skb, hdr_len);
1548 }
1549 return 0;
1550}
1551
1552/* L2CAP-A2MP interface */
1553
Peter Krystadf5289202011-11-14 15:11:22 -08001554static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001555{
1556 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1557 int len;
1558 int err = 0;
1559 struct amp_mgr *mgr;
1560
1561 mgr = get_amp_mgr_sk(sk);
1562 if (!mgr)
1563 goto a2mp_finished;
1564
1565 len = skb->len;
1566 while (len >= sizeof(*hdr)) {
1567 struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
1568 u16 clen = le16_to_cpu(hdr->len);
1569
1570 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
1571 if (clen > len || !hdr->ident) {
1572 err = -EINVAL;
1573 break;
1574 }
1575 switch (hdr->code) {
1576 case A2MP_COMMAND_REJ:
1577 command_rej(mgr, skb);
1578 break;
1579 case A2MP_DISCOVER_REQ:
1580 err = discover_req(mgr, skb);
1581 break;
1582 case A2MP_CHANGE_NOTIFY:
1583 err = change_notify(mgr, skb);
1584 break;
1585 case A2MP_GETINFO_REQ:
1586 err = getinfo_req(mgr, skb);
1587 break;
1588 case A2MP_GETAMPASSOC_REQ:
1589 err = getampassoc_req(mgr, skb);
1590 break;
1591 case A2MP_CREATEPHYSLINK_REQ:
1592 err = createphyslink_req(mgr, skb);
1593 break;
1594 case A2MP_DISCONNPHYSLINK_REQ:
1595 err = disconnphyslink_req(mgr, skb);
1596 break;
1597 case A2MP_CHANGE_RSP:
1598 case A2MP_DISCOVER_RSP:
1599 case A2MP_GETINFO_RSP:
1600 case A2MP_GETAMPASSOC_RSP:
1601 case A2MP_CREATEPHYSLINK_RSP:
1602 case A2MP_DISCONNPHYSLINK_RSP:
1603 err = a2mp_rsp(mgr, skb);
1604 break;
1605 default:
1606 BT_ERR("Unknown A2MP signaling command 0x%2.2x",
1607 hdr->code);
1608 skb_pull(skb, sizeof(*hdr));
1609 err = -EINVAL;
1610 break;
1611 }
1612 len = skb->len;
1613 }
1614
1615a2mp_finished:
1616 if (err && mgr) {
1617 struct a2mp_cmd_rej rej;
1618 rej.reason = cpu_to_le16(0);
1619 send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
1620 sizeof(rej), &rej);
1621 }
1622}
1623
1624/* L2CAP-A2MP interface */
1625
1626static int send_a2mp(struct socket *sock, u8 *data, int len)
1627{
1628 struct kvec iv = { data, len };
1629 struct msghdr msg;
1630
1631 memset(&msg, 0, sizeof(msg));
1632
1633 return kernel_sendmsg(sock, &msg, &iv, 1, len);
1634}
1635
1636static void data_ready_worker(struct work_struct *w)
1637{
1638 struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
1639 struct sock *sk = work->sk;
1640 struct sk_buff *skb;
1641
1642 /* skb_dequeue() is thread-safe */
1643 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1644 a2mp_receive(sk, skb);
1645 kfree_skb(skb);
1646 }
1647 sock_put(work->sk);
1648 kfree(work);
1649}
1650
1651static void data_ready(struct sock *sk, int bytes)
1652{
1653 struct amp_work_data_ready *work;
1654 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1655 if (work) {
1656 INIT_WORK((struct work_struct *) work, data_ready_worker);
1657 sock_hold(sk);
1658 work->sk = sk;
1659 work->bytes = bytes;
1660 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1661 kfree(work);
1662 sock_put(sk);
1663 }
1664 }
1665}
1666
1667static void state_change_worker(struct work_struct *w)
1668{
1669 struct amp_work_state_change *work = (struct amp_work_state_change *) w;
1670 struct amp_mgr *mgr;
1671 switch (work->sk->sk_state) {
1672 case BT_CONNECTED:
1673 /* socket is up */
1674 BT_DBG("CONNECTED");
1675 mgr = get_amp_mgr_sk(work->sk);
1676 if (mgr) {
1677 mgr->connected = 1;
1678 if (mgr->skb) {
1679 l2cap_recv_deferred_frame(work->sk, mgr->skb);
1680 mgr->skb = NULL;
1681 }
1682 launch_ctx(mgr);
1683 }
1684 break;
1685
1686 case BT_CLOSED:
1687 /* connection is gone */
1688 BT_DBG("CLOSED");
1689 mgr = get_amp_mgr_sk(work->sk);
1690 if (mgr) {
1691 if (!sock_flag(work->sk, SOCK_DEAD))
1692 sock_release(mgr->a2mp_sock);
1693 mgr->a2mp_sock = NULL;
1694 remove_amp_mgr(mgr);
1695 }
1696 break;
1697
1698 default:
1699 /* something else happened */
1700 break;
1701 }
1702 sock_put(work->sk);
1703 kfree(work);
1704}
1705
1706static void state_change(struct sock *sk)
1707{
1708 struct amp_work_state_change *work;
1709 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1710 if (work) {
1711 INIT_WORK((struct work_struct *) work, state_change_worker);
1712 sock_hold(sk);
1713 work->sk = sk;
1714 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1715 kfree(work);
1716 sock_put(sk);
1717 }
1718 }
1719}
1720
1721static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
1722{
1723 int err;
1724 struct socket *sock;
1725 struct sockaddr_l2 addr;
1726 struct sock *sk;
1727 struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
1728 L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
1729 L2CAP_MODE_ERTM, 1, 0xFF, 1};
1730
1731
1732 err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
1733 BTPROTO_L2CAP, &sock);
1734
1735 if (err) {
1736 BT_ERR("sock_create_kern failed %d", err);
1737 return NULL;
1738 }
1739
1740 sk = sock->sk;
1741 sk->sk_data_ready = data_ready;
1742 sk->sk_state_change = state_change;
1743
1744 memset(&addr, 0, sizeof(addr));
1745 bacpy(&addr.l2_bdaddr, src);
1746 addr.l2_family = AF_BLUETOOTH;
1747 addr.l2_cid = L2CAP_CID_A2MP;
1748 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1749 if (err) {
1750 BT_ERR("kernel_bind failed %d", err);
1751 sock_release(sock);
1752 return NULL;
1753 }
1754
1755 l2cap_fixed_channel_config(sk, &opts);
1756
1757 memset(&addr, 0, sizeof(addr));
1758 bacpy(&addr.l2_bdaddr, dst);
1759 addr.l2_family = AF_BLUETOOTH;
1760 addr.l2_cid = L2CAP_CID_A2MP;
1761 err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
1762 O_NONBLOCK);
1763 if ((err == 0) || (err == -EINPROGRESS))
1764 return sock;
1765 else {
1766 BT_ERR("kernel_connect failed %d", err);
1767 sock_release(sock);
1768 return NULL;
1769 }
1770}
1771
1772static void conn_ind_worker(struct work_struct *w)
1773{
1774 struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
Peter Krystad072a51f2012-03-30 12:59:33 -07001775 struct hci_conn *hcon = work->hcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 struct sk_buff *skb = work->skb;
1777 struct amp_mgr *mgr;
1778
Peter Krystad072a51f2012-03-30 12:59:33 -07001779 mgr = get_create_amp_mgr(hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780 BT_DBG("mgr %p", mgr);
Peter Krystad072a51f2012-03-30 12:59:33 -07001781 hci_conn_put(hcon);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001782 kfree(work);
1783}
1784
1785static void create_physical_worker(struct work_struct *w)
1786{
1787 struct amp_work_create_physical *work =
1788 (struct amp_work_create_physical *) w;
1789
1790 create_physical(work->conn, work->sk);
1791 sock_put(work->sk);
1792 kfree(work);
1793}
1794
1795static void accept_physical_worker(struct work_struct *w)
1796{
1797 struct amp_work_accept_physical *work =
1798 (struct amp_work_accept_physical *) w;
1799
1800 accept_physical(work->conn, work->id, work->sk);
1801 sock_put(work->sk);
1802 kfree(work);
1803}
1804
1805/* L2CAP Fixed Channel interface */
1806
Peter Krystad072a51f2012-03-30 12:59:33 -07001807void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808{
1809 struct amp_work_conn_ind *work;
Peter Krystad072a51f2012-03-30 12:59:33 -07001810 BT_DBG("hcon %p, skb %p", hcon, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1812 if (work) {
1813 INIT_WORK((struct work_struct *) work, conn_ind_worker);
Peter Krystad072a51f2012-03-30 12:59:33 -07001814 hci_conn_hold(hcon);
1815 work->hcon = hcon;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816 work->skb = skb;
Peter Krystad072a51f2012-03-30 12:59:33 -07001817 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1818 hci_conn_put(hcon);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819 kfree(work);
Peter Krystad072a51f2012-03-30 12:59:33 -07001820 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001821 }
1822}
1823
1824/* L2CAP Physical Link interface */
1825
1826void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
1827{
1828 struct amp_work_create_physical *work;
1829 BT_DBG("conn %p", conn);
1830 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1831 if (work) {
1832 INIT_WORK((struct work_struct *) work, create_physical_worker);
1833 work->conn = conn;
1834 work->sk = sk;
1835 sock_hold(sk);
1836 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1837 sock_put(sk);
1838 kfree(work);
1839 }
1840 }
1841}
1842
1843void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
1844{
1845 struct amp_work_accept_physical *work;
1846 BT_DBG("conn %p", conn);
1847
1848 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1849 if (work) {
1850 INIT_WORK((struct work_struct *) work, accept_physical_worker);
1851 work->conn = conn;
1852 work->sk = sk;
1853 work->id = id;
1854 sock_hold(sk);
1855 if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
1856 sock_put(sk);
1857 kfree(work);
1858 }
1859 }
1860}
1861
1862/* HCI interface */
1863
1864static void amp_cmd_cmplt_worker(struct work_struct *w)
1865{
1866 struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
1867 struct hci_dev *hdev = work->hdev;
1868 u16 opcode = work->opcode;
1869 struct sk_buff *skb = work->skb;
1870 struct amp_ctx *ctx;
1871
1872 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
1873 if (ctx)
1874 execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
1875 kfree_skb(skb);
1876 kfree(w);
1877}
1878
1879static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
1880 struct sk_buff *skb)
1881{
1882 struct amp_work_cmd_cmplt *work;
1883 struct sk_buff *skbc;
1884 BT_DBG("hdev %p opcode 0x%x skb %p len %d",
1885 hdev, opcode, skb, skb->len);
1886 skbc = skb_clone(skb, GFP_ATOMIC);
1887 if (!skbc)
1888 return;
1889 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1890 if (work) {
1891 INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
1892 work->hdev = hdev;
1893 work->opcode = opcode;
1894 work->skb = skbc;
1895 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1896 kfree(work);
1897 }
1898}
1899
1900static void amp_cmd_status_worker(struct work_struct *w)
1901{
1902 struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
1903 struct hci_dev *hdev = work->hdev;
1904 u16 opcode = work->opcode;
1905 u8 status = work->status;
1906 struct amp_ctx *ctx;
1907
1908 ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
1909 if (ctx)
1910 execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
1911 kfree(w);
1912}
1913
1914static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
1915{
1916 struct amp_work_cmd_status *work;
1917 BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
1918 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1919 if (work) {
1920 INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
1921 work->hdev = hdev;
1922 work->opcode = opcode;
1923 work->status = status;
1924 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1925 kfree(work);
1926 }
1927}
1928
1929static void amp_event_worker(struct work_struct *w)
1930{
1931 struct amp_work_event *work = (struct amp_work_event *) w;
1932 struct hci_dev *hdev = work->hdev;
1933 u8 event = work->event;
1934 struct sk_buff *skb = work->skb;
1935 struct amp_ctx *ctx;
1936
1937 if (event == HCI_EV_AMP_STATUS_CHANGE) {
1938 struct hci_ev_amp_status_change *ev;
1939 if (skb->len < sizeof(*ev))
1940 goto amp_event_finished;
1941 ev = (void *) skb->data;
1942 if (ev->status != 0)
1943 goto amp_event_finished;
1944 if (ev->amp_status == hdev->amp_status)
1945 goto amp_event_finished;
1946 hdev->amp_status = ev->amp_status;
1947 send_a2mp_change_notify();
1948 goto amp_event_finished;
1949 }
1950 ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
1951 if (ctx)
1952 execute_ctx(ctx, AMP_HCI_EVENT, skb);
1953
1954amp_event_finished:
1955 kfree_skb(skb);
1956 kfree(w);
1957}
1958
1959static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
1960{
1961 struct amp_work_event *work;
1962 struct sk_buff *skbc;
1963 BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
1964 skbc = skb_clone(skb, GFP_ATOMIC);
1965 if (!skbc)
1966 return;
1967 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1968 if (work) {
1969 INIT_WORK((struct work_struct *) work, amp_event_worker);
1970 work->hdev = hdev;
1971 work->event = event;
1972 work->skb = skbc;
1973 if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
1974 kfree(work);
1975 }
1976}
1977
1978static void amp_dev_event_worker(struct work_struct *w)
1979{
1980 send_a2mp_change_notify();
1981 kfree(w);
1982}
1983
1984static int amp_dev_event(struct notifier_block *this, unsigned long event,
1985 void *ptr)
1986{
1987 struct hci_dev *hdev = (struct hci_dev *) ptr;
1988 struct amp_work_event *work;
1989
1990 if (hdev->amp_type == HCI_BREDR)
1991 return NOTIFY_DONE;
1992
1993 switch (event) {
1994 case HCI_DEV_UNREG:
1995 case HCI_DEV_REG:
1996 case HCI_DEV_UP:
1997 case HCI_DEV_DOWN:
1998 BT_DBG("hdev %p event %ld", hdev, event);
1999 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2000 if (work) {
2001 INIT_WORK((struct work_struct *) work,
2002 amp_dev_event_worker);
2003 if (queue_work(amp_workqueue,
2004 (struct work_struct *) work) == 0)
2005 kfree(work);
2006 }
2007 }
2008 return NOTIFY_DONE;
2009}
2010
2011
2012/* L2CAP module init continued */
2013
2014static struct notifier_block amp_notifier = {
2015 .notifier_call = amp_dev_event
2016};
2017
2018static struct amp_mgr_cb hci_amp = {
2019 .amp_cmd_complete_event = amp_cmd_cmplt_evt,
2020 .amp_cmd_status_event = amp_cmd_status_evt,
2021 .amp_event = amp_evt
2022};
2023
2024int amp_init(void)
2025{
2026 hci_register_amp(&hci_amp);
2027 hci_register_notifier(&amp_notifier);
2028 amp_next_handle = 1;
2029 amp_workqueue = create_singlethread_workqueue("a2mp");
2030 if (!amp_workqueue)
2031 return -EPERM;
2032 return 0;
2033}
2034
2035void amp_exit(void)
2036{
2037 hci_unregister_amp(&hci_amp);
2038 hci_unregister_notifier(&amp_notifier);
2039 flush_workqueue(amp_workqueue);
2040 destroy_workqueue(amp_workqueue);
2041}