Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index bfb3dc0..8bb69df 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,7 +6,6 @@
tristate "Bluetooth subsystem support"
depends on NET && !S390
depends on RFKILL || !RFKILL
- select CRYPTO
help
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
@@ -37,11 +36,11 @@
config BT_L2CAP
bool "L2CAP protocol support"
+ depends on CRYPTO_HMAC
+ depends on CRYPTO_SHA256
select CRC16
- select CRYPTO
select CRYPTO_BLKCIPHER
select CRYPTO_AES
- select CRYPTO_ECB
help
L2CAP (Logical Link Control and Adaptation Protocol) provides
connection oriented and connection-less data transport. L2CAP
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 9b67f3d..6dfe088 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,5 +9,5 @@
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
-bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
+bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o amp.o smp.o
bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 7c73a10..219df5c 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -387,7 +387,7 @@
}
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
@@ -399,7 +399,33 @@
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ int skb_len = skb_headlen(skb);
+
+ if (chunk <= skb_len) {
+ __skb_pull(skb, chunk);
+ } else {
+ struct sk_buff *frag;
+
+ __skb_pull(skb, skb_len);
+ chunk -= skb_len;
+
+ skb_walk_frags(skb, frag) {
+ if (chunk <= frag->len) {
+ /* Pulling partial data */
+ skb->len -= chunk;
+ skb->data_len -= chunk;
+ __skb_pull(frag, chunk);
+ break;
+ } else if (frag->len) {
+ /* Pulling all frag data */
+ chunk -= frag->len;
+ skb->len -= frag->len;
+ skb->data_len -= frag->len;
+ __skb_pull(frag, frag->len);
+ }
+ }
+ }
+
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
@@ -532,8 +558,9 @@
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (!timeo) {
err = -EINPROGRESS;
break;
@@ -547,13 +574,12 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
new file mode 100644
index 0000000..cb43a9c
--- /dev/null
+++ b/net/bluetooth/amp.c
@@ -0,0 +1,2035 @@
+/*
+ Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/amp.h>
+
+static struct workqueue_struct *amp_workqueue;
+
+LIST_HEAD(amp_mgr_list);
+DEFINE_RWLOCK(amp_mgr_list_lock);
+
+static int send_a2mp(struct socket *sock, u8 *data, int len);
+
+static void ctx_timeout(unsigned long data);
+
+static void launch_ctx(struct amp_mgr *mgr);
+static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
+static int kill_ctx(struct amp_ctx *ctx);
+static int cancel_ctx(struct amp_ctx *ctx);
+
+static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
+
+static void remove_amp_mgr(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p", mgr);
+
+ write_lock_bh(&_mgr_list_lock);
+ list_del(&mgr->list);
+ write_unlock_bh(&_mgr_list_lock);
+
+ read_lock_bh(&mgr->ctx_list_lock);
+ while (!list_empty(&mgr->ctx_list)) {
+ struct amp_ctx *ctx;
+ ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
+ read_unlock_bh(&mgr->ctx_list_lock);
+ BT_DBG("kill ctx %p", ctx);
+ kill_ctx(ctx);
+ read_lock_bh(&mgr->ctx_list_lock);
+ }
+ read_unlock_bh(&mgr->ctx_list_lock);
+
+ kfree(mgr->ctrls);
+
+ kfree(mgr);
+}
+
+static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct amp_mgr *found = NULL;
+
+ read_lock_bh(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
+ found = mgr;
+ break;
+ }
+ }
+ read_unlock_bh(&_mgr_list_lock);
+ return found;
+}
+
+static struct amp_mgr *get_create_amp_mgr(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ write_lock_bh(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if (mgr->l2cap_conn == conn) {
+ BT_DBG("conn %p found %p", conn, mgr);
+ goto gc_finished;
+ }
+ }
+
+ mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
+ if (!mgr)
+ goto gc_finished;
+
+ mgr->l2cap_conn = conn;
+ mgr->next_ident = 1;
+ INIT_LIST_HEAD(&mgr->ctx_list);
+ rwlock_init(&mgr->ctx_list_lock);
+ mgr->skb = skb;
+ BT_DBG("conn %p mgr %p", conn, mgr);
+ mgr->a2mp_sock = open_fixed_channel(conn->src, conn->dst);
+ if (!mgr->a2mp_sock) {
+ kfree(mgr);
+ goto gc_finished;
+ }
+ list_add(&(mgr->list), &_mgr_list);
+
+gc_finished:
+ write_unlock_bh(&_mgr_list_lock);
+ return mgr;
+}
+
+static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
+{
+ if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
+ return mgr->ctrls;
+ else
+ return NULL;
+}
+
+static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
+{
+ struct amp_ctrl *ctrl;
+
+ BT_DBG("mgr %p, id %d", mgr, id);
+ if ((mgr->ctrls) && (mgr->ctrls->id == id))
+ ctrl = mgr->ctrls;
+ else {
+ kfree(mgr->ctrls);
+ ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
+ if (ctrl) {
+ ctrl->mgr = mgr;
+ ctrl->id = id;
+ }
+ mgr->ctrls = ctrl;
+ }
+
+ return ctrl;
+}
+
+static struct amp_ctx *create_ctx(u8 type, u8 state)
+{
+ struct amp_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx) {
+ ctx->type = type;
+ ctx->state = state;
+ init_timer(&(ctx->timer));
+ ctx->timer.function = ctx_timeout;
+ ctx->timer.data = (unsigned long) ctx;
+ }
+ BT_DBG("ctx %p, type %d", ctx, type);
+ return ctx;
+}
+
+static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
+{
+ BT_DBG("ctx %p", ctx);
+ write_lock_bh(&mgr->ctx_list_lock);
+ list_add(&ctx->list, &mgr->ctx_list);
+ write_unlock_bh(&mgr->ctx_list_lock);
+ ctx->mgr = mgr;
+ execute_ctx(ctx, AMP_INIT, 0);
+}
+
+static void destroy_ctx(struct amp_ctx *ctx)
+{
+ struct amp_mgr *mgr = ctx->mgr;
+
+ BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
+ del_timer(&ctx->timer);
+ write_lock_bh(&mgr->ctx_list_lock);
+ list_del(&ctx->list);
+ write_unlock_bh(&mgr->ctx_list_lock);
+ if (ctx->deferred)
+ execute_ctx(ctx->deferred, AMP_INIT, 0);
+ kfree(ctx);
+}
+
+static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
+{
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock_bh(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if (ctx->type == type) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock_bh(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
+{
+ struct amp_mgr *mgr = cur->mgr;
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock_bh(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if ((ctx->type == type) && (ctx != cur)) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock_bh(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
+{
+ struct amp_ctx *fnd = NULL;
+ struct amp_ctx *ctx;
+
+ read_lock_bh(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ if ((ctx->evt_type & AMP_A2MP_RSP) &&
+ (ctx->rsp_ident == ident)) {
+ fnd = ctx;
+ break;
+ }
+ }
+ read_unlock_bh(&mgr->ctx_list_lock);
+ return fnd;
+}
+
+static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
+ u16 evt_value)
+{
+ struct amp_mgr *mgr;
+ struct amp_ctx *fnd = NULL;
+
+ read_lock_bh(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ struct amp_ctx *ctx;
+ read_lock_bh(&mgr->ctx_list_lock);
+ list_for_each_entry(ctx, &mgr->ctx_list, list) {
+ struct hci_dev *ctx_hdev;
+ ctx_hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
+ if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
+ switch (evt_type) {
+ case AMP_HCI_CMD_STATUS:
+ case AMP_HCI_CMD_CMPLT:
+ if (ctx->opcode == evt_value)
+ fnd = ctx;
+ break;
+ case AMP_HCI_EVENT:
+ if (ctx->evt_code == (u8) evt_value)
+ fnd = ctx;
+ break;
+ }
+ }
+ if (ctx_hdev)
+ hci_dev_put(ctx_hdev);
+
+ if (fnd)
+ break;
+ }
+ read_unlock_bh(&mgr->ctx_list_lock);
+ }
+ read_unlock_bh(&_mgr_list_lock);
+ return fnd;
+}
+
+static inline u8 next_ident(struct amp_mgr *mgr)
+{
+ if (++mgr->next_ident == 0)
+ mgr->next_ident = 1;
+ return mgr->next_ident;
+}
+
+static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
+ u16 len, void *data, u16 len2, void *data2)
+{
+ struct a2mp_cmd_hdr *hdr;
+ int plen;
+ u8 *p, *cmd;
+
+ BT_DBG("ident %d code 0x%02x", ident, code);
+ if (!mgr->a2mp_sock)
+ return;
+ plen = sizeof(*hdr) + len + len2;
+ cmd = kzalloc(plen, GFP_ATOMIC);
+ if (!cmd)
+ return;
+ hdr = (struct a2mp_cmd_hdr *) cmd;
+ hdr->code = code;
+ hdr->ident = ident;
+ hdr->len = cpu_to_le16(len+len2);
+ p = cmd + sizeof(*hdr);
+ memcpy(p, data, len);
+ p += len;
+ memcpy(p, data2, len2);
+ send_a2mp(mgr->a2mp_sock, cmd, plen);
+ kfree(cmd);
+}
+
+static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
+ u8 code, u16 len, void *data)
+{
+ send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
+}
+
+static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_cmd_rej *rej;
+ struct amp_ctx *ctx;
+
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+ rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < sizeof(*rej))
+ return -EINVAL;
+ BT_DBG("reason %d", le16_to_cpu(rej->reason));
+ ctx = get_ctx_a2mp(mgr, hdr->ident);
+ if (ctx)
+ kill_ctx(ctx);
+ skb_pull(skb, sizeof(*rej));
+ return 0;
+}
+
+static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
+ void *msg)
+{
+ struct a2mp_cl clist[16];
+ struct a2mp_cl *cl;
+ struct hci_dev *hdev;
+ int num_ctrls = 1, id;
+
+ cl = clist;
+ cl->id = 0;
+ cl->type = 0;
+ cl->status = 1;
+
+ for (id = 0; id < 16; ++id) {
+ hdev = hci_dev_get(id);
+ if (hdev) {
+ if ((hdev->amp_type != HCI_BREDR) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ (cl + num_ctrls)->id = HCI_A2MP_ID(hdev->id);
+ (cl + num_ctrls)->type = hdev->amp_type;
+ (cl + num_ctrls)->status = hdev->amp_status;
+ ++num_ctrls;
+ }
+ hci_dev_put(hdev);
+ }
+ }
+ send_a2mp_cmd2(mgr, ident, code, len, msg,
+ num_ctrls*sizeof(*cl), clist);
+
+ return 0;
+}
+
+static void send_a2mp_change_notify(void)
+{
+ struct amp_mgr *mgr;
+
+ read_lock_bh(&_mgr_list_lock);
+ list_for_each_entry(mgr, &_mgr_list, list) {
+ if (mgr->discovered)
+ send_a2mp_cl(mgr, next_ident(mgr),
+ A2MP_CHANGE_NOTIFY, 0, NULL);
+ }
+ read_unlock_bh(&_mgr_list_lock);
+}
+
+static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_discover_req *req;
+ u16 *efm;
+ struct a2mp_discover_rsp rsp;
+
+ req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < sizeof(*req))
+ return -EINVAL;
+ efm = (u16 *) skb_pull(skb, sizeof(*req));
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
+ le16_to_cpu(req->ext_feat));
+
+ while (le16_to_cpu(req->ext_feat) & 0x8000) {
+ if (skb->len < sizeof(*efm))
+ return -EINVAL;
+ req->ext_feat = *efm;
+ BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
+ efm = (u16 *) skb_pull(skb, sizeof(*efm));
+ }
+
+ rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp.ext_feat = 0;
+
+ mgr->discovered = 1;
+
+ return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
+ sizeof(rsp), &rsp);
+}
+
+static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct a2mp_cl *cl;
+
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
+ while (skb->len >= sizeof(*cl)) {
+ struct amp_ctrl *ctrl;
+ if (cl->id != 0) {
+ ctrl = get_create_ctrl(mgr, cl->id);
+ if (ctrl != NULL) {
+ ctrl->type = cl->type;
+ ctrl->status = cl->status;
+ }
+ }
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO find controllers in manager that were not on received */
+ /* controller list and destroy them */
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
+
+ return 0;
+}
+
+static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u8 *data;
+ int id;
+ struct hci_dev *hdev;
+ struct a2mp_getinfo_rsp rsp;
+
+ data = (u8 *) skb_pull(skb, sizeof(*hdr));
+ if (le16_to_cpu(hdr->len) < sizeof(*data))
+ return -EINVAL;
+ if (skb->len < sizeof(*data))
+ return -EINVAL;
+ id = *data;
+ skb_pull(skb, sizeof(*data));
+ rsp.id = id;
+ rsp.status = 1;
+
+ BT_DBG("id %d", id);
+ hdev = hci_dev_get(A2MP_HCI_ID(id));
+
+ if (hdev && hdev->amp_type != HCI_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ return 0;
+}
+
+static void create_physical(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct amp_ctx *ctx = NULL;
+
+ BT_DBG("conn %p", conn);
+ mgr = get_create_amp_mgr(conn, NULL);
+ if (!mgr)
+ goto cp_finished;
+ BT_DBG("mgr %p", mgr);
+ ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
+ if (!ctx)
+ goto cp_finished;
+ ctx->sk = sk;
+ sock_hold(sk);
+ start_ctx(mgr, ctx);
+ return;
+
+cp_finished:
+ l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
+}
+
+static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
+{
+ struct amp_mgr *mgr;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ struct amp_ctx *aplctx = NULL;
+ u8 remote_id = 0;
+ int result = -EINVAL;
+
+ BT_DBG("lcon %p", lcon);
+ mgr = get_create_amp_mgr(lcon, NULL);
+ if (!mgr)
+ goto ap_finished;
+ BT_DBG("mgr %p", mgr);
+ hdev = hci_dev_get(A2MP_HCI_ID(id));
+ if (!hdev)
+ goto ap_finished;
+ BT_DBG("hdev %p", hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &mgr->l2cap_conn->hcon->dst);
+ if (conn) {
+ BT_DBG("conn %p", hdev);
+ result = 0;
+ remote_id = conn->dst_id;
+ goto ap_finished;
+ }
+ aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
+ if (!aplctx)
+ goto ap_finished;
+ aplctx->sk = sk;
+ sock_hold(sk);
+ return;
+
+ap_finished:
+ l2cap_amp_physical_complete(result, id, remote_id, sk);
+}
+
+static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct amp_ctx *ctx;
+ struct a2mp_getampassoc_req *req;
+
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+
+ ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->id = req->id;
+ ctx->d.gaa.req_ident = hdr->ident;
+ ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
+ if (ctx->hdev)
+ ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
+ GFP_ATOMIC);
+ start_ctx(mgr, ctx);
+ return 0;
+}
+
+static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct sk_buff *skb = (struct sk_buff *) data;
+ struct hci_cp_read_local_amp_assoc cp;
+ struct hci_rp_read_local_amp_assoc *rp;
+ struct a2mp_getampassoc_rsp rsp;
+ u16 rem_len;
+ u16 frag_len;
+
+ rsp.status = 1;
+ if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
+ goto gaa_finished;
+
+ switch (ctx->state) {
+ case AMP_GAA_INIT:
+ ctx->state = AMP_GAA_RLAA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
+ ctx->d.gaa.len_so_far = 0;
+ cp.phy_handle = 0;
+ cp.len_so_far = 0;
+ cp.max_len = ctx->hdev->amp_assoc_size;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ break;
+
+ case AMP_GAA_RLAA_COMPLETE:
+ if (skb->len < 4)
+ goto gaa_finished;
+ rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
+ if (rp->status)
+ goto gaa_finished;
+ rem_len = le16_to_cpu(rp->rem_len);
+ skb_pull(skb, 4);
+ frag_len = skb->len;
+
+ if (ctx->d.gaa.len_so_far + rem_len <=
+ ctx->hdev->amp_assoc_size) {
+ struct hci_cp_read_local_amp_assoc cp;
+ u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
+ memcpy(assoc, rp->frag, frag_len);
+ ctx->d.gaa.len_so_far += rem_len;
+ rem_len -= frag_len;
+ if (rem_len == 0) {
+ rsp.status = 0;
+ goto gaa_finished;
+ }
+ /* more assoc data to read */
+ cp.phy_handle = 0;
+ cp.len_so_far = ctx->d.gaa.len_so_far;
+ cp.max_len = ctx->hdev->amp_assoc_size;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ }
+ break;
+
+ default:
+ goto gaa_finished;
+ break;
+ }
+ return 0;
+
+gaa_finished:
+ rsp.id = ctx->id;
+ send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
+ sizeof(rsp), &rsp,
+ ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
+ kfree(ctx->d.gaa.assoc);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ return 1;
+}
+
+struct hmac_sha256_result {
+ struct completion completion;
+ int err;
+};
+
+static void hmac_sha256_final(struct crypto_async_request *req, int err)
+{
+ struct hmac_sha256_result *r = req->data;
+ if (err == -EINPROGRESS)
+ return;
+ r->err = err;
+ complete(&r->completion);
+}
+
+int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
+ u8 *output, u8 outlen)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm;
+ struct scatterlist sg;
+ struct ahash_request *req;
+ struct hmac_sha256_result tresult;
+ void *hash_buff = NULL;
+
+ unsigned char hash_result[64];
+ int i;
+
+ memset(output, 0, outlen);
+
+ init_completion(&tresult.completion);
+
+ tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(tfm)) {
+ BT_DBG("crypto_alloc_ahash failed");
+ ret = PTR_ERR(tfm);
+ goto err_tfm;
+ }
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ BT_DBG("failed to allocate request for hmac(sha256)");
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ hmac_sha256_final, &tresult);
+
+ hash_buff = kzalloc(psize, GFP_KERNEL);
+ if (!hash_buff) {
+ BT_DBG("failed to kzalloc hash_buff");
+ ret = -ENOMEM;
+ goto err_hash_buf;
+ }
+
+ memset(hash_result, 0, 64);
+ memcpy(hash_buff, plaintext, psize);
+ sg_init_one(&sg, hash_buff, psize);
+
+ if (ksize) {
+ crypto_ahash_clear_flags(tfm, ~0);
+ ret = crypto_ahash_setkey(tfm, key, ksize);
+
+ if (ret) {
+ BT_DBG("crypto_ahash_setkey failed");
+ goto err_setkey;
+ }
+ }
+
+ ahash_request_set_crypt(req, &sg, hash_result, psize);
+ ret = crypto_ahash_digest(req);
+
+ BT_DBG("ret 0x%x", ret);
+
+ switch (ret) {
+ case 0:
+ for (i = 0; i < outlen; i++)
+ output[i] = hash_result[i];
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(&tresult.completion);
+ if (!ret && !tresult.err) {
+ INIT_COMPLETION(tresult.completion);
+ break;
+ } else {
+ BT_DBG("wait_for_completion_interruptible failed");
+ if (!ret)
+ ret = tresult.err;
+ goto out;
+ }
+ default:
+ goto out;
+ }
+
+out:
+err_setkey:
+ kfree(hash_buff);
+err_hash_buf:
+ ahash_request_free(req);
+err_req:
+ crypto_free_ahash(tfm);
+err_tfm:
+ return ret;
+}
+
+static void show_key(u8 *k)
+{
+ int i = 0;
+ for (i = 0; i < 32; i += 8)
+ BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
+ *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
+ *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
+}
+
+static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
+{
+ u8 bt2_key[32];
+ u8 gamp_key[32];
+ u8 b802_key[32];
+ int result;
+
+ if (!hci_conn_check_link_mode(conn))
+ return -EACCES;
+
+ BT_DBG("key_type %d", conn->key_type);
+ if (conn->key_type < 3)
+ return -EACCES;
+
+ *type = conn->key_type;
+ *len = 32;
+ memcpy(&bt2_key[0], conn->link_key, 16);
+ memcpy(&bt2_key[16], conn->link_key, 16);
+ result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
+ if (result)
+ goto ps_finished;
+
+ if (conn->key_type == 3) {
+ BT_DBG("gamp_key");
+ show_key(gamp_key);
+ memcpy(data, gamp_key, 32);
+ goto ps_finished;
+ }
+
+ result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
+ if (result)
+ goto ps_finished;
+
+ BT_DBG("802b_key");
+ show_key(b802_key);
+ memcpy(data, b802_key, 32);
+
+ps_finished:
+ return result;
+}
+
+static u8 amp_next_handle;
+static inline u8 physlink_handle(struct hci_dev *hdev)
+{
+ /* TODO amp_next_handle should be part of hci_dev */
+ if (amp_next_handle == 0)
+ amp_next_handle = 1;
+ return amp_next_handle++;
+}
+
+/* Start an Accept Physical Link sequence */
+static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ struct amp_ctx *ctx = NULL;
+ struct a2mp_createphyslink_req *req;
+
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ /* initialize the context */
+ ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->d.apl.req_ident = hdr->ident;
+ ctx->d.apl.remote_id = req->local_id;
+ ctx->id = req->remote_id;
+
+ /* add the supplied remote assoc to the context */
+ ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
+ if (ctx->d.apl.remote_assoc)
+ memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
+ ctx->d.apl.len_so_far = 0;
+ ctx->d.apl.rem_len = skb->len;
+ skb_pull(skb, skb->len);
+ ctx->hdev = hci_dev_get(A2MP_HCI_ID(ctx->id));
+ start_ctx(mgr, ctx);
+ return 0;
+}
+
+static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct sk_buff *skb = data;
+ struct hci_cp_accept_phys_link acp;
+ struct hci_cp_write_remote_amp_assoc wcp;
+ struct hci_rp_write_remote_amp_assoc *wrp;
+ struct hci_ev_cmd_status *cs = data;
+ struct hci_ev_phys_link_complete *ev;
+ struct a2mp_createphyslink_rsp rsp;
+ struct amp_ctx *cplctx;
+ struct amp_ctx *aplctx;
+ u16 frag_len;
+ struct hci_conn *conn;
+ int result;
+
+ BT_DBG("state %d", ctx->state);
+ result = -EINVAL;
+ rsp.status = 1; /* Invalid Controller ID */
+ if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
+ goto apl_finished;
+ if (evt_type == AMP_KILLED) {
+ result = -EAGAIN;
+ rsp.status = 4; /* Disconnect request received */
+ goto apl_finished;
+ }
+ if (!ctx->d.apl.remote_assoc) {
+ result = -ENOMEM;
+ rsp.status = 2; /* Unable to Start */
+ goto apl_finished;
+ }
+
+ switch (ctx->state) {
+ case AMP_APL_INIT:
+ BT_DBG("local_id %d, remote_id %d",
+ ctx->id, ctx->d.apl.remote_id);
+ conn = hci_conn_hash_lookup_id(ctx->hdev,
+ &ctx->mgr->l2cap_conn->hcon->dst,
+ ctx->d.apl.remote_id);
+ if (conn) {
+ result = -EEXIST;
+ rsp.status = 5; /* Already Exists */
+ goto apl_finished;
+ }
+
+ aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
+ if ((aplctx) &&
+ (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
+ BT_DBG("deferred to %p", aplctx);
+ aplctx->deferred = ctx;
+ break;
+ }
+
+ cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
+ if ((cplctx) &&
+ (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
+ struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
+ BT_DBG("local %s remote %s",
+ batostr(&bcon->hdev->bdaddr),
+ batostr(&bcon->dst));
+ if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
+ (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
+ BT_DBG("COLLISION LOSER");
+ cplctx->deferred = ctx;
+ cancel_ctx(cplctx);
+ break;
+ } else {
+ BT_DBG("COLLISION WINNER");
+ result = -EISCONN;
+ rsp.status = 3; /* Collision */
+ goto apl_finished;
+ }
+ }
+
+ result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
+ &acp.key_len, &acp.type);
+ if (result) {
+ BT_DBG("SECURITY");
+ rsp.status = 6; /* Security Violation */
+ goto apl_finished;
+ }
+
+ ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
+ ctx->state = AMP_APL_APL_STATUS;
+ ctx->evt_type = AMP_HCI_CMD_STATUS;
+ ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
+ acp.phy_handle = ctx->d.apl.phy_handle;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
+ break;
+
+ case AMP_APL_APL_STATUS:
+ if (cs->status != 0)
+ goto apl_finished;
+ /* PAL will accept link, send a2mp response */
+ rsp.local_id = ctx->id;
+ rsp.remote_id = ctx->d.apl.remote_id;
+ rsp.status = 0;
+ send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
+ A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
+
+ /* send the first assoc fragment */
+ wcp.phy_handle = ctx->d.apl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
+ ctx->state = AMP_APL_WRA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+
+ case AMP_APL_WRA_COMPLETE:
+ /* received write remote amp assoc command complete event */
+ wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
+ if (wrp->status != 0)
+ goto apl_finished;
+ if (wrp->phy_handle != ctx->d.apl.phy_handle)
+ goto apl_finished;
+ /* update progress */
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ ctx->d.apl.len_so_far += frag_len;
+ ctx->d.apl.rem_len -= frag_len;
+ if (ctx->d.apl.rem_len > 0) {
+ u8 *assoc;
+ /* another assoc fragment to send */
+ wcp.phy_handle = ctx->d.apl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
+ assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
+ memcpy(wcp.frag, assoc, frag_len);
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+ }
+ /* wait for physical link complete event */
+ ctx->state = AMP_APL_PL_COMPLETE;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
+ break;
+
+ case AMP_APL_PL_COMPLETE:
+ /* physical link complete event received */
+ if (skb->len < sizeof(*ev))
+ goto apl_finished;
+ ev = (struct hci_ev_phys_link_complete *) skb->data;
+ if (ev->phy_handle != ctx->d.apl.phy_handle)
+ break;
+ if (ev->status != 0)
+ goto apl_finished;
+ conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
+ if (!conn)
+ goto apl_finished;
+ result = 0;
+ BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
+ conn->dst_id = ctx->d.apl.remote_id;
+ bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
+ goto apl_finished;
+ break;
+
+ default:
+ goto apl_finished;
+ break;
+ }
+ return 0;
+
+apl_finished:
+ if (ctx->sk)
+ l2cap_amp_physical_complete(result, ctx->id,
+ ctx->d.apl.remote_id, ctx->sk);
+ if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
+ rsp.local_id = ctx->id;
+ rsp.remote_id = ctx->d.apl.remote_id;
+ send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
+ A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
+ }
+ kfree(ctx->d.apl.remote_assoc);
+ if (ctx->sk)
+ sock_put(ctx->sk);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ return 1;
+}
+
+static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
+{
+ struct hci_cp_disconn_phys_link dcp;
+
+ ctx->state = AMP_CPL_PL_CANCEL;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
+ dcp.phy_handle = ctx->d.cpl.phy_handle;
+ dcp.reason = reason;
+ hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
+}
+
+static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct amp_ctrl *ctrl;
+ struct sk_buff *skb = data;
+ struct a2mp_cmd_hdr *hdr;
+ struct hci_ev_cmd_status *cs = data;
+ struct amp_ctx *cplctx;
+ struct a2mp_discover_req dreq;
+ struct a2mp_discover_rsp *drsp;
+ u16 *efm;
+ struct a2mp_getinfo_req greq;
+ struct a2mp_getinfo_rsp *grsp;
+ struct a2mp_cl *cl;
+ struct a2mp_getampassoc_req areq;
+ struct a2mp_getampassoc_rsp *arsp;
+ struct hci_cp_create_phys_link cp;
+ struct hci_cp_write_remote_amp_assoc wcp;
+ struct hci_rp_write_remote_amp_assoc *wrp;
+ struct hci_ev_channel_selected *cev;
+ struct hci_cp_read_local_amp_assoc rcp;
+ struct hci_rp_read_local_amp_assoc *rrp;
+ struct a2mp_createphyslink_req creq;
+ struct a2mp_createphyslink_rsp *crsp;
+ struct hci_ev_phys_link_complete *pev;
+ struct hci_ev_disconn_phys_link_complete *dev;
+ u8 *assoc, *rassoc, *lassoc;
+ u16 frag_len;
+ u16 rem_len;
+ int result = -EAGAIN;
+ struct hci_conn *conn;
+
+ BT_DBG("state %d", ctx->state);
+ if (evt_type == AMP_KILLED)
+ goto cpl_finished;
+
+ if (evt_type == AMP_CANCEL) {
+ if ((ctx->state < AMP_CPL_CPL_STATUS) ||
+ ((ctx->state == AMP_CPL_PL_COMPLETE) &&
+ !(ctx->evt_type & AMP_HCI_EVENT)))
+ goto cpl_finished;
+
+ cancel_cpl_ctx(ctx, 0x16);
+ return 0;
+ }
+
+ switch (ctx->state) {
+ case AMP_CPL_INIT:
+ cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
+ if (cplctx) {
+ BT_DBG("deferred to %p", cplctx);
+ cplctx->deferred = ctx;
+ break;
+ }
+ ctx->state = AMP_CPL_DISC_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ dreq.ext_feat = 0;
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
+ sizeof(dreq), &dreq);
+ break;
+
+ case AMP_CPL_DISC_RSP:
+ drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
+ if (skb->len < (sizeof(*drsp))) {
+ result = -EINVAL;
+ goto cpl_finished;
+ }
+
+ efm = (u16 *) skb_pull(skb, sizeof(*drsp));
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
+ le16_to_cpu(drsp->ext_feat));
+
+ while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
+ if (skb->len < sizeof(*efm)) {
+ result = -EINVAL;
+ goto cpl_finished;
+ }
+ drsp->ext_feat = *efm;
+ BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
+ efm = (u16 *) skb_pull(skb, sizeof(*efm));
+ }
+ cl = (struct a2mp_cl *) efm;
+
+ /* find the first remote and local controller with the
+ * same type
+ */
+ greq.id = 0;
+ result = -ENODEV;
+ while (skb->len >= sizeof(*cl)) {
+ if ((cl->id != 0) && (greq.id == 0)) {
+ struct hci_dev *hdev;
+ hdev = hci_dev_get_type(cl->type);
+ if (hdev) {
+ struct hci_conn *conn;
+ ctx->hdev = hdev;
+ ctx->id = HCI_A2MP_ID(hdev->id);
+ ctx->d.cpl.remote_id = cl->id;
+ conn = hci_conn_hash_lookup_ba(hdev,
+ ACL_LINK,
+ &ctx->mgr->l2cap_conn->hcon->dst);
+ if (conn) {
+ BT_DBG("PL_COMPLETE exists %x",
+ (int) conn->handle);
+ result = 0;
+ }
+ ctrl = get_create_ctrl(ctx->mgr,
+ cl->id);
+ if (ctrl) {
+ ctrl->type = cl->type;
+ ctrl->status = cl->status;
+ }
+ greq.id = cl->id;
+ }
+ }
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+ if ((!greq.id) || (!result))
+ goto cpl_finished;
+ ctx->state = AMP_CPL_GETINFO_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
+ sizeof(greq), &greq);
+ break;
+
+ case AMP_CPL_GETINFO_RSP:
+ if (skb->len < sizeof(*grsp))
+ goto cpl_finished;
+ grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
+ if (grsp->status)
+ goto cpl_finished;
+ if (grsp->id != ctx->d.cpl.remote_id)
+ goto cpl_finished;
+ ctrl = get_ctrl(ctx->mgr, grsp->id);
+ if (!ctrl)
+ goto cpl_finished;
+ ctrl->status = grsp->status;
+ ctrl->total_bw = le32_to_cpu(grsp->total_bw);
+ ctrl->max_bw = le32_to_cpu(grsp->max_bw);
+ ctrl->min_latency = le32_to_cpu(grsp->min_latency);
+ ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
+ ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
+ skb_pull(skb, sizeof(*grsp));
+
+ ctx->d.cpl.max_len = ctrl->max_assoc_size;
+
+ /* setup up GAA request */
+ areq.id = ctx->d.cpl.remote_id;
+
+ /* advance context state */
+ ctx->state = AMP_CPL_GAA_RSP;
+ ctx->evt_type = AMP_A2MP_RSP;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
+ sizeof(areq), &areq);
+ break;
+
+ case AMP_CPL_GAA_RSP:
+ if (skb->len < sizeof(*arsp))
+ goto cpl_finished;
+ hdr = (void *) skb->data;
+ arsp = (void *) skb_pull(skb, sizeof(*hdr));
+ if (arsp->id != ctx->d.cpl.remote_id)
+ goto cpl_finished;
+ if (arsp->status != 0)
+ goto cpl_finished;
+
+ /* store away remote assoc */
+ assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
+ ctx->d.cpl.len_so_far = 0;
+ ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
+ rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
+ if (!rassoc)
+ goto cpl_finished;
+ memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
+ ctx->d.cpl.remote_assoc = rassoc;
+ skb_pull(skb, ctx->d.cpl.rem_len);
+
+ /* set up CPL command */
+ ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
+ cp.phy_handle = ctx->d.cpl.phy_handle;
+ if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
+ &cp.key_len, &cp.type)) {
+ result = -EPERM;
+ goto cpl_finished;
+ }
+
+ /* advance context state */
+ ctx->state = AMP_CPL_CPL_STATUS;
+ ctx->evt_type = AMP_HCI_CMD_STATUS;
+ ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
+ break;
+
+ case AMP_CPL_CPL_STATUS:
+ /* received create physical link command status */
+ if (cs->status != 0)
+ goto cpl_finished;
+ /* send the first assoc fragment */
+ wcp.phy_handle = ctx->d.cpl.phy_handle;
+ wcp.len_so_far = ctx->d.cpl.len_so_far;
+ wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
+ ctx->state = AMP_CPL_WRA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+
+ case AMP_CPL_WRA_COMPLETE:
+ /* received write remote amp assoc command complete event */
+ if (skb->len < sizeof(*wrp))
+ goto cpl_finished;
+ wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
+ if (wrp->status != 0)
+ goto cpl_finished;
+ if (wrp->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+
+ /* update progress */
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ ctx->d.cpl.len_so_far += frag_len;
+ ctx->d.cpl.rem_len -= frag_len;
+ if (ctx->d.cpl.rem_len > 0) {
+ /* another assoc fragment to send */
+ wcp.phy_handle = ctx->d.cpl.phy_handle;
+ wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
+ wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
+ frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
+ memcpy(wcp.frag,
+ ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
+ frag_len);
+ hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
+ break;
+ }
+ /* now wait for channel selected event */
+ ctx->state = AMP_CPL_CHANNEL_SELECT;
+ ctx->evt_type = AMP_HCI_EVENT;
+ ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
+ break;
+
+ case AMP_CPL_CHANNEL_SELECT:
+ /* received channel selection event */
+ if (skb->len < sizeof(*cev))
+ goto cpl_finished;
+ cev = (void *) skb->data;
+/* TODO - PK This check is valid but Libra PAL returns 0 for handle during
+ Create Physical Link collision scenario
+ if (cev->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+*/
+
+ /* request the first local assoc fragment */
+ rcp.phy_handle = ctx->d.cpl.phy_handle;
+ rcp.len_so_far = 0;
+ rcp.max_len = ctx->d.cpl.max_len;
+ lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
+ if (!lassoc)
+ goto cpl_finished;
+ ctx->d.cpl.local_assoc = lassoc;
+ ctx->d.cpl.len_so_far = 0;
+ ctx->state = AMP_CPL_RLA_COMPLETE;
+ ctx->evt_type = AMP_HCI_CMD_CMPLT;
+ ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
+ break;
+
+ case AMP_CPL_RLA_COMPLETE:
+ /* received read local amp assoc command complete event */
+ if (skb->len < 4)
+ goto cpl_finished;
+ rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
+ if (rrp->status)
+ goto cpl_finished;
+ if (rrp->phy_handle != ctx->d.cpl.phy_handle)
+ goto cpl_finished;
+ rem_len = le16_to_cpu(rrp->rem_len);
+ skb_pull(skb, 4);
+ frag_len = skb->len;
+
+ if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
+ goto cpl_finished;
+
+ /* save this fragment in context */
+ lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
+ memcpy(lassoc, rrp->frag, frag_len);
+ ctx->d.cpl.len_so_far += frag_len;
+ rem_len -= frag_len;
+ if (rem_len > 0) {
+ /* request another local assoc fragment */
+ rcp.phy_handle = ctx->d.cpl.phy_handle;
+ rcp.len_so_far = ctx->d.cpl.len_so_far;
+ rcp.max_len = ctx->d.cpl.max_len;
+ hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
+ } else {
+ creq.local_id = ctx->id;
+ creq.remote_id = ctx->d.cpl.remote_id;
+ /* wait for A2MP rsp AND phys link complete event */
+ ctx->state = AMP_CPL_PL_COMPLETE;
+ ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
+ ctx->rsp_ident = next_ident(ctx->mgr);
+ ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
+ send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
+ A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
+ ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
+ }
+ break;
+
+ case AMP_CPL_PL_COMPLETE:
+ if (evt_type == AMP_A2MP_RSP) {
+ /* create physical link response received */
+ ctx->evt_type &= ~AMP_A2MP_RSP;
+ if (skb->len < sizeof(*crsp))
+ goto cpl_finished;
+ crsp = (void *) skb_pull(skb, sizeof(*hdr));
+ if ((crsp->local_id != ctx->d.cpl.remote_id) ||
+ (crsp->remote_id != ctx->id) ||
+ (crsp->status != 0)) {
+ cancel_cpl_ctx(ctx, 0x13);
+ break;
+ }
+
+ /* notify Qualcomm PAL */
+ if (ctx->hdev->manufacturer == 0x001d)
+ hci_send_cmd(ctx->hdev,
+ hci_opcode_pack(0x3f, 0x00), 0, NULL);
+ }
+ if (evt_type == AMP_HCI_EVENT) {
+ ctx->evt_type &= ~AMP_HCI_EVENT;
+ /* physical link complete event received */
+ if (skb->len < sizeof(*pev))
+ goto cpl_finished;
+ pev = (void *) skb->data;
+ if (pev->phy_handle != ctx->d.cpl.phy_handle)
+ break;
+ if (pev->status != 0)
+ goto cpl_finished;
+ }
+ if (ctx->evt_type)
+ break;
+ conn = hci_conn_hash_lookup_handle(ctx->hdev,
+ ctx->d.cpl.phy_handle);
+ if (!conn)
+ goto cpl_finished;
+ result = 0;
+ BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
+ bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
+ conn->dst_id = ctx->d.cpl.remote_id;
+ conn->out = 1;
+ goto cpl_finished;
+ break;
+
+ case AMP_CPL_PL_CANCEL:
+ dev = (void *) skb->data;
+ BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
+ result = -EISCONN;
+ goto cpl_finished;
+ break;
+
+ default:
+ goto cpl_finished;
+ break;
+ }
+ return 0;
+
+cpl_finished:
+ l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
+ ctx->sk);
+ if (ctx->sk)
+ sock_put(ctx->sk);
+ if (ctx->hdev)
+ hci_dev_put(ctx->hdev);
+ kfree(ctx->d.cpl.remote_assoc);
+ kfree(ctx->d.cpl.local_assoc);
+ return 1;
+}
+
+static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (void *) skb->data;
+ struct a2mp_disconnphyslink_req *req;
+ struct a2mp_disconnphyslink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *conn;
+ struct amp_ctx *aplctx;
+
+ BT_DBG("mgr %p skb %p", mgr, skb);
+ if (hdr->len < sizeof(*req))
+ return -EINVAL;
+ req = (void *) skb_pull(skb, sizeof(*hdr));
+ skb_pull(skb, sizeof(*req));
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = 0;
+ BT_DBG("local_id %d remote_id %d",
+ (int) rsp.local_id, (int) rsp.remote_id);
+ hdev = hci_dev_get(A2MP_HCI_ID(rsp.local_id));
+ if (!hdev) {
+ rsp.status = 1; /* Invalid Controller ID */
+ goto dpl_finished;
+ }
+ BT_DBG("hdev %p", hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &mgr->l2cap_conn->hcon->dst);
+ if (!conn) {
+ aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
+ if (aplctx) {
+ kill_ctx(aplctx);
+ rsp.status = 0;
+ goto dpl_finished;
+ }
+ rsp.status = 2; /* No Physical Link exists */
+ goto dpl_finished;
+ }
+ BT_DBG("conn %p", conn);
+ hci_disconnect(conn, 0x13);
+
+dpl_finished:
+ send_a2mp_cmd(mgr, hdr->ident,
+ A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
+ if (hdev)
+ hci_dev_put(hdev);
+ return 0;
+}
+
+static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
+{
+ struct amp_mgr *mgr = ctx->mgr;
+ u8 finished = 0;
+
+ if (!mgr->connected)
+ return 0;
+
+ switch (ctx->type) {
+ case AMP_GETAMPASSOC:
+ finished = getampassoc_handler(ctx, evt_type, data);
+ break;
+ case AMP_CREATEPHYSLINK:
+ finished = createphyslink_handler(ctx, evt_type, data);
+ break;
+ case AMP_ACCEPTPHYSLINK:
+ finished = acceptphyslink_handler(ctx, evt_type, data);
+ break;
+ }
+
+ if (!finished)
+ mod_timer(&(ctx->timer), jiffies +
+ msecs_to_jiffies(A2MP_RSP_TIMEOUT));
+ else
+ destroy_ctx(ctx);
+ return finished;
+}
+
+static int cancel_ctx(struct amp_ctx *ctx)
+{
+ return execute_ctx(ctx, AMP_CANCEL, 0);
+}
+
+static int kill_ctx(struct amp_ctx *ctx)
+{
+ return execute_ctx(ctx, AMP_KILLED, 0);
+}
+
+static void ctx_timeout_worker(struct work_struct *w)
+{
+ struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
+ struct amp_ctx *ctx = work->ctx;
+ kill_ctx(ctx);
+ kfree(work);
+}
+
+static void ctx_timeout(unsigned long data)
+{
+ struct amp_ctx *ctx = (struct amp_ctx *) data;
+ struct amp_work_ctx_timeout *work;
+
+ BT_DBG("ctx %p", ctx);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
+ work->ctx = ctx;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void launch_ctx(struct amp_mgr *mgr)
+{
+ struct amp_ctx *ctx = NULL;
+
+ BT_DBG("mgr %p", mgr);
+ read_lock_bh(&mgr->ctx_list_lock);
+ if (!list_empty(&mgr->ctx_list))
+ ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
+ read_unlock_bh(&mgr->ctx_list_lock);
+ BT_DBG("ctx %p", ctx);
+ if (ctx)
+ execute_ctx(ctx, AMP_INIT, NULL);
+}
+
+static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
+{
+ struct amp_ctx *ctx;
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u16 hdr_len = le16_to_cpu(hdr->len);
+
+ /* find context waiting for A2MP rsp with this rsp's identifier */
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+ ctx = get_ctx_a2mp(mgr, hdr->ident);
+ if (ctx) {
+ execute_ctx(ctx, AMP_A2MP_RSP, skb);
+ } else {
+ BT_DBG("context not found");
+ skb_pull(skb, sizeof(*hdr));
+ if (hdr_len > skb->len)
+ hdr_len = skb->len;
+ skb_pull(skb, hdr_len);
+ }
+ return 0;
+}
+
+/* L2CAP-A2MP interface */
+
+void a2mp_receive(struct sock *sk, struct sk_buff *skb)
+{
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ int len;
+ int err = 0;
+ struct amp_mgr *mgr;
+
+ mgr = get_amp_mgr_sk(sk);
+ if (!mgr)
+ goto a2mp_finished;
+
+ len = skb->len;
+ while (len >= sizeof(*hdr)) {
+ struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
+ u16 clen = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
+ if (clen > len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ command_rej(mgr, skb);
+ break;
+ case A2MP_DISCOVER_REQ:
+ err = discover_req(mgr, skb);
+ break;
+ case A2MP_CHANGE_NOTIFY:
+ err = change_notify(mgr, skb);
+ break;
+ case A2MP_GETINFO_REQ:
+ err = getinfo_req(mgr, skb);
+ break;
+ case A2MP_GETAMPASSOC_REQ:
+ err = getampassoc_req(mgr, skb);
+ break;
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = createphyslink_req(mgr, skb);
+ break;
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = disconnphyslink_req(mgr, skb);
+ break;
+ case A2MP_CHANGE_RSP:
+ case A2MP_DISCOVER_RSP:
+ case A2MP_GETINFO_RSP:
+ case A2MP_GETAMPASSOC_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_rsp(mgr, skb);
+ break;
+ default:
+ BT_ERR("Unknown A2MP signaling command 0x%2.2x",
+ hdr->code);
+ skb_pull(skb, sizeof(*hdr));
+ err = -EINVAL;
+ break;
+ }
+ len = skb->len;
+ }
+
+a2mp_finished:
+ if (err && mgr) {
+ struct a2mp_cmd_rej rej;
+ rej.reason = cpu_to_le16(0);
+ send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
+ sizeof(rej), &rej);
+ }
+}
+
+/* L2CAP-A2MP interface */
+
+static int send_a2mp(struct socket *sock, u8 *data, int len)
+{
+ struct kvec iv = { data, len };
+ struct msghdr msg;
+
+ memset(&msg, 0, sizeof(msg));
+
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
+}
+
+static void data_ready_worker(struct work_struct *w)
+{
+ struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
+ struct sock *sk = work->sk;
+ struct sk_buff *skb;
+
+ /* skb_dequeue() is thread-safe */
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ a2mp_receive(sk, skb);
+ kfree_skb(skb);
+ }
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void data_ready(struct sock *sk, int bytes)
+{
+ struct amp_work_data_ready *work;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, data_ready_worker);
+ sock_hold(sk);
+ work->sk = sk;
+ work->bytes = bytes;
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ kfree(work);
+ sock_put(sk);
+ }
+ }
+}
+
+static void state_change_worker(struct work_struct *w)
+{
+ struct amp_work_state_change *work = (struct amp_work_state_change *) w;
+ struct amp_mgr *mgr;
+ switch (work->sk->sk_state) {
+ case BT_CONNECTED:
+ /* socket is up */
+ BT_DBG("CONNECTED");
+ mgr = get_amp_mgr_sk(work->sk);
+ if (mgr) {
+ mgr->connected = 1;
+ if (mgr->skb) {
+ l2cap_recv_deferred_frame(work->sk, mgr->skb);
+ mgr->skb = NULL;
+ }
+ launch_ctx(mgr);
+ }
+ break;
+
+ case BT_CLOSED:
+ /* connection is gone */
+ BT_DBG("CLOSED");
+ mgr = get_amp_mgr_sk(work->sk);
+ if (mgr) {
+ if (!sock_flag(work->sk, SOCK_DEAD))
+ sock_release(mgr->a2mp_sock);
+ mgr->a2mp_sock = NULL;
+ remove_amp_mgr(mgr);
+ }
+ break;
+
+ default:
+ /* something else happened */
+ break;
+ }
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void state_change(struct sock *sk)
+{
+ struct amp_work_state_change *work;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, state_change_worker);
+ sock_hold(sk);
+ work->sk = sk;
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ kfree(work);
+ sock_put(sk);
+ }
+ }
+}
+
+static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
+{
+ int err;
+ struct socket *sock;
+ struct sockaddr_l2 addr;
+ struct sock *sk;
+ struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
+ L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
+ L2CAP_MODE_ERTM, 1, 0xFF, 1};
+
+
+ err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
+ BTPROTO_L2CAP, &sock);
+
+ if (err) {
+ BT_ERR("sock_create_kern failed %d", err);
+ return NULL;
+ }
+
+ sk = sock->sk;
+ sk->sk_data_ready = data_ready;
+ sk->sk_state_change = state_change;
+
+ memset(&addr, 0, sizeof(addr));
+ bacpy(&addr.l2_bdaddr, src);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_cid = L2CAP_CID_A2MP;
+ err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
+ if (err) {
+ BT_ERR("kernel_bind failed %d", err);
+ sock_release(sock);
+ return NULL;
+ }
+
+ l2cap_fixed_channel_config(sk, &opts);
+
+ memset(&addr, 0, sizeof(addr));
+ bacpy(&addr.l2_bdaddr, dst);
+ addr.l2_family = AF_BLUETOOTH;
+ addr.l2_cid = L2CAP_CID_A2MP;
+ err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
+ O_NONBLOCK);
+ if ((err == 0) || (err == -EINPROGRESS))
+ return sock;
+ else {
+ BT_ERR("kernel_connect failed %d", err);
+ sock_release(sock);
+ return NULL;
+ }
+}
+
+static void conn_ind_worker(struct work_struct *w)
+{
+ struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
+ struct l2cap_conn *conn = work->conn;
+ struct sk_buff *skb = work->skb;
+ struct amp_mgr *mgr;
+
+ mgr = get_create_amp_mgr(conn, skb);
+ BT_DBG("mgr %p", mgr);
+ kfree(work);
+}
+
+static void create_physical_worker(struct work_struct *w)
+{
+ struct amp_work_create_physical *work =
+ (struct amp_work_create_physical *) w;
+
+ create_physical(work->conn, work->sk);
+ sock_put(work->sk);
+ kfree(work);
+}
+
+static void accept_physical_worker(struct work_struct *w)
+{
+ struct amp_work_accept_physical *work =
+ (struct amp_work_accept_physical *) w;
+
+ accept_physical(work->conn, work->id, work->sk);
+ sock_put(work->sk);
+ kfree(work);
+}
+
+/* L2CAP Fixed Channel interface */
+
+void amp_conn_ind(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct amp_work_conn_ind *work;
+ BT_DBG("conn %p, skb %p", conn, skb);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, conn_ind_worker);
+ work->conn = conn;
+ work->skb = skb;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+/* L2CAP Physical Link interface */
+
+void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct amp_work_create_physical *work;
+ BT_DBG("conn %p", conn);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, create_physical_worker);
+ work->conn = conn;
+ work->sk = sk;
+ sock_hold(sk);
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ sock_put(sk);
+ kfree(work);
+ }
+ }
+}
+
+void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
+{
+ struct amp_work_accept_physical *work;
+ BT_DBG("conn %p", conn);
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, accept_physical_worker);
+ work->conn = conn;
+ work->sk = sk;
+ work->id = id;
+ sock_hold(sk);
+ if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
+ sock_put(sk);
+ kfree(work);
+ }
+ }
+}
+
+/* HCI interface */
+
+static void amp_cmd_cmplt_worker(struct work_struct *w)
+{
+ struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
+ struct hci_dev *hdev = work->hdev;
+ u16 opcode = work->opcode;
+ struct sk_buff *skb = work->skb;
+ struct amp_ctx *ctx;
+
+ ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
+ kfree_skb(skb);
+ kfree(w);
+}
+
+static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
+ struct sk_buff *skb)
+{
+ struct amp_work_cmd_cmplt *work;
+ struct sk_buff *skbc;
+ BT_DBG("hdev %p opcode 0x%x skb %p len %d",
+ hdev, opcode, skb, skb->len);
+ skbc = skb_clone(skb, GFP_ATOMIC);
+ if (!skbc)
+ return;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
+ work->hdev = hdev;
+ work->opcode = opcode;
+ work->skb = skbc;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_cmd_status_worker(struct work_struct *w)
+{
+ struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
+ struct hci_dev *hdev = work->hdev;
+ u16 opcode = work->opcode;
+ u8 status = work->status;
+ struct amp_ctx *ctx;
+
+ ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
+ kfree(w);
+}
+
+static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
+{
+ struct amp_work_cmd_status *work;
+ BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
+ work->hdev = hdev;
+ work->opcode = opcode;
+ work->status = status;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_event_worker(struct work_struct *w)
+{
+ struct amp_work_event *work = (struct amp_work_event *) w;
+ struct hci_dev *hdev = work->hdev;
+ u8 event = work->event;
+ struct sk_buff *skb = work->skb;
+ struct amp_ctx *ctx;
+
+ if (event == HCI_EV_AMP_STATUS_CHANGE) {
+ struct hci_ev_amp_status_change *ev;
+ if (skb->len < sizeof(*ev))
+ goto amp_event_finished;
+ ev = (void *) skb->data;
+ if (ev->status != 0)
+ goto amp_event_finished;
+ if (ev->amp_status == hdev->amp_status)
+ goto amp_event_finished;
+ hdev->amp_status = ev->amp_status;
+ send_a2mp_change_notify();
+ goto amp_event_finished;
+ }
+ ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
+ if (ctx)
+ execute_ctx(ctx, AMP_HCI_EVENT, skb);
+
+amp_event_finished:
+ kfree_skb(skb);
+ kfree(w);
+}
+
+static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
+{
+ struct amp_work_event *work;
+ struct sk_buff *skbc;
+ BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
+ skbc = skb_clone(skb, GFP_ATOMIC);
+ if (!skbc)
+ return;
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work, amp_event_worker);
+ work->hdev = hdev;
+ work->event = event;
+ work->skb = skbc;
+ if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+}
+
+static void amp_dev_event_worker(struct work_struct *w)
+{
+ send_a2mp_change_notify();
+ kfree(w);
+}
+
+static int amp_dev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct hci_dev *hdev = (struct hci_dev *) ptr;
+ struct amp_work_event *work;
+
+ if (hdev->amp_type == HCI_BREDR)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case HCI_DEV_UNREG:
+ case HCI_DEV_REG:
+ case HCI_DEV_UP:
+ case HCI_DEV_DOWN:
+ BT_DBG("hdev %p event %ld", hdev, event);
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK((struct work_struct *) work,
+ amp_dev_event_worker);
+ if (queue_work(amp_workqueue,
+ (struct work_struct *) work) == 0)
+ kfree(work);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+
+/* L2CAP module init continued */
+
+static struct notifier_block amp_notifier = {
+ .notifier_call = amp_dev_event
+};
+
+static struct amp_mgr_cb hci_amp = {
+ .amp_cmd_complete_event = amp_cmd_cmplt_evt,
+ .amp_cmd_status_event = amp_cmd_status_evt,
+ .amp_event = amp_evt
+};
+
+int amp_init(void)
+{
+ hci_register_amp(&hci_amp);
+ hci_register_notifier(&_notifier);
+ amp_next_handle = 1;
+ amp_workqueue = create_singlethread_workqueue("a2mp");
+ if (!amp_workqueue)
+ return -EPERM;
+ return 0;
+}
+
+void amp_exit(void)
+{
+ hci_unregister_amp(&hci_amp);
+ hci_unregister_notifier(&_notifier);
+ flush_workqueue(amp_workqueue);
+ destroy_workqueue(amp_workqueue);
+}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e7ee531..8e6c061 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,7 +155,6 @@
unsigned int role;
unsigned long state;
unsigned long flags;
- atomic_t terminate;
struct task_struct *task;
struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index d9edfe8..dfadb65 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,15 +484,16 @@
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (1) {
+ while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&s->terminate))
- break;
/* RX */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- bnep_rx_frame(s, skb);
+ if (!skb_linearize(skb))
+ bnep_rx_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state != BT_CONNECTED)
@@ -506,7 +507,7 @@
schedule();
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -642,10 +643,9 @@
down_read(&bnep_session_sem);
s = __bnep_get_session(req->dst);
- if (s) {
- atomic_inc(&s->terminate);
- wake_up_process(s->task);
- } else
+ if (s)
+ kthread_stop(s->task);
+ else
err = -ENOENT;
up_read(&bnep_session_sem);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 040f67b..744233c 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@
{
struct capi_ctr *ctrl = &session->ctrl;
struct cmtp_application *application;
- __u16 appl;
+ __u16 cmd, appl;
__u32 contr;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,6 +344,7 @@
return;
}
+ cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
appl = CAPIMSG_APPID(skb->data);
contr = CAPIMSG_CONTROL(skb->data);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index c5b11af..bff02ad 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -300,7 +300,10 @@
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- cmtp_recv_frame(session, skb);
+ if (!skb_linearize(skb))
+ cmtp_recv_frame(session, skb);
+ else
+ kfree_skb(skb);
}
cmtp_process_transmit(session);
@@ -346,8 +349,7 @@
bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
- session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
- l2cap_pi(sock->sk)->chan->imtu);
+ session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu);
BT_DBG("mtu %d", session->mtu);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 33c4e0c..0ecb942 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+ Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -132,15 +132,22 @@
void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
{
- struct hci_cp_disconnect cp;
-
BT_DBG("%p", conn);
conn->state = BT_DISCONN;
- cp.handle = cpu_to_le16(conn->handle);
- cp.reason = reason;
- hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
+ if (conn->hdev->dev_type == HCI_BREDR) {
+ struct hci_cp_disconnect cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
+ } else {
+ struct hci_cp_disconn_phys_link cp;
+ cp.phy_handle = (u8) conn->handle;
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHYS_LINK,
+ sizeof(cp), &cp);
+ }
}
void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -178,9 +185,9 @@
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.max_latency = cpu_to_le16(0x000A);
cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.retrans_effort = 0x01;
hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
}
@@ -320,19 +327,6 @@
hci_conn_enter_sniff_mode(conn);
}
-static void hci_conn_auto_accept(unsigned long arg)
-{
- struct hci_conn *conn = (void *) arg;
- struct hci_dev *hdev = conn->hdev;
-
- hci_dev_lock(hdev);
-
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
- &conn->dst);
-
- hci_dev_unlock(hdev);
-}
-
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
__u16 pkt_type, bdaddr_t *dst)
{
@@ -352,7 +346,6 @@
conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
- conn->key_type = 0xff;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -385,8 +378,6 @@
setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
- setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
- (unsigned long) conn);
atomic_set(&conn->refcnt, 0);
@@ -407,6 +398,18 @@
return conn;
}
+struct hci_conn *hci_le_conn_add(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 addr_type)
+{
+ struct hci_conn *conn = hci_conn_add(hdev, LE_LINK, 0, dst);
+ if (!conn)
+ return NULL;
+
+ conn->dst_type = addr_type;
+
+ return conn;
+}
+
int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -417,8 +420,6 @@
del_timer(&conn->disc_timer);
- del_timer(&conn->auto_accept_timer);
-
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
if (sco)
@@ -453,12 +454,62 @@
hci_dev_put(hdev);
- if (conn->handle == 0)
- kfree(conn);
+ return 0;
+}
+
+struct hci_chan *hci_chan_add(struct hci_dev *hdev)
+{
+ struct hci_chan *chan;
+
+ BT_DBG("%s", hdev->name);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ atomic_set(&chan->refcnt, 0);
+
+ hci_dev_hold(hdev);
+
+ chan->hdev = hdev;
+
+ list_add(&chan->list, &hdev->chan_list.list);
+
+ return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+ BT_DBG("%s chan %p", chan->hdev->name, chan);
+
+ list_del(&chan->list);
+
+ hci_conn_put(chan->conn);
+ hci_dev_put(chan->hdev);
+
+ kfree(chan);
return 0;
}
+void hci_chan_put(struct hci_chan *chan)
+{
+ struct hci_cp_disconn_logical_link cp;
+
+ BT_DBG("chan %p refcnt %d", chan, atomic_read(&chan->refcnt));
+ if (!atomic_dec_and_test(&chan->refcnt))
+ return;
+
+ BT_DBG("chan->conn->state %d", chan->conn->state);
+ if (chan->conn->state == BT_CONNECTED) {
+ cp.log_handle = cpu_to_le16(chan->ll_handle);
+ hci_send_cmd(chan->conn->hdev, HCI_OP_DISCONN_LOGICAL_LINK,
+ sizeof(cp), &cp);
+ } else
+ hci_chan_del(chan);
+}
+EXPORT_SYMBOL(hci_chan_put);
+
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
@@ -472,6 +523,8 @@
list_for_each(p, &hci_dev_list) {
struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ if (d->dev_type != HCI_BREDR)
+ continue;
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
continue;
@@ -499,6 +552,58 @@
}
EXPORT_SYMBOL(hci_get_route);
+struct hci_dev *hci_dev_get_type(u8 amp_type)
+{
+ struct hci_dev *hdev = NULL;
+ struct hci_dev *d;
+
+ BT_DBG("amp_type %d", amp_type);
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if ((d->amp_type == amp_type) && test_bit(HCI_UP, &d->flags)) {
+ hdev = d;
+ break;
+ }
+ }
+
+ if (hdev)
+ hdev = hci_dev_hold(hdev);
+
+ read_unlock_bh(&hci_dev_list_lock);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_dev_get_type);
+
+struct hci_dev *hci_dev_get_amp(bdaddr_t *dst)
+{
+ struct hci_dev *d;
+ struct hci_dev *hdev = NULL;
+
+ BT_DBG("%s dst %s", hdev->name, batostr(dst));
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ struct hci_conn *conn;
+ if (d->dev_type == HCI_BREDR)
+ continue;
+ conn = hci_conn_hash_lookup_ba(d, ACL_LINK, dst);
+ if (conn) {
+ hdev = d;
+ break;
+ }
+ }
+
+ if (hdev)
+ hdev = hci_dev_hold(hdev);
+
+ read_unlock_bh(&hci_dev_list_lock);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_dev_get_amp);
+
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
@@ -522,12 +627,10 @@
if (!entry)
return ERR_PTR(-EHOSTUNREACH);
- le = hci_conn_add(hdev, LE_LINK, 0, dst);
+ le = hci_le_conn_add(hdev, dst, entry->bdaddr_type);
if (!le)
return ERR_PTR(-ENOMEM);
- le->dst_type = entry->bdaddr_type;
-
hci_le_connect(le);
hci_conn_hold(le);
@@ -571,7 +674,7 @@
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
acl->power_save = 1;
- hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
+ hci_conn_enter_active_mode(acl, 1);
if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
/* defer SCO setup until mode change completed */
@@ -586,6 +689,36 @@
}
EXPORT_SYMBOL(hci_connect);
+void hci_disconnect(struct hci_conn *conn, __u8 reason)
+{
+ BT_DBG("conn %p", conn);
+
+ hci_proto_disconn_cfm(conn, reason);
+}
+EXPORT_SYMBOL(hci_disconnect);
+
+void hci_disconnect_amp(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_dev *hdev = NULL;
+
+ BT_DBG("conn %p", conn);
+
+ read_lock_bh(&hci_dev_list_lock);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ struct hci_conn *c;
+ if (hdev == conn->hdev)
+ continue;
+ if (hdev->amp_type == HCI_BREDR)
+ continue;
+ c = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &conn->dst);
+ if (c)
+ hci_disconnect(c, reason);
+ }
+
+ read_unlock_bh(&hci_dev_list_lock);
+}
+
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
@@ -619,105 +752,44 @@
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
-
- /* encrypt must be pending if auth is also pending */
- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
-
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
- if (conn->key_type != 0xff)
- set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
}
return 0;
}
-/* Encrypt the the link */
-static void hci_conn_encrypt(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.encrypt = 0x01;
- hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
- }
-}
-
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
BT_DBG("conn %p", conn);
- /* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
return 1;
- /* For non 2.1 devices and low security level we don't need the link
- key. */
if (sec_level == BT_SECURITY_LOW &&
(!conn->ssp_mode || !conn->hdev->ssp_mode))
return 1;
- /* For other security levels we need the link key. */
- if (!(conn->link_mode & HCI_LM_AUTH))
- goto auth;
-
- /* An authenticated combination key has sufficient security for any
- security level. */
- if (conn->key_type == HCI_LK_AUTH_COMBINATION)
- goto encrypt;
-
- /* An unauthenticated combination key has sufficient security for
- security level 1 and 2. */
- if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM ||
- sec_level == BT_SECURITY_LOW))
- goto encrypt;
-
- /* A combination key has always sufficient security for the security
- levels 1 or 2. High security level requires the combination key
- is generated using maximum PIN code length (16).
- For pre 2.1 units. */
- if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level != BT_SECURITY_HIGH ||
- conn->pin_length == 16))
- goto encrypt;
-
-auth:
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
- return 0;
-
- if (!hci_conn_auth(conn, sec_level, auth_type))
- return 0;
-
-encrypt:
if (conn->link_mode & HCI_LM_ENCRYPT)
- return 1;
+ return hci_conn_auth(conn, sec_level, auth_type);
- hci_conn_encrypt(conn);
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ return 0;
+
+ if (hci_conn_auth(conn, sec_level, auth_type)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 1;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ }
+
return 0;
}
EXPORT_SYMBOL(hci_conn_security);
-/* Check secure link requirement */
-int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
-{
- BT_DBG("conn %p", conn);
-
- if (sec_level != BT_SECURITY_HIGH)
- return 1; /* Accept if non-secure is required */
-
- if (conn->sec_level == BT_SECURITY_HIGH)
- return 1;
-
- return 0; /* Reject not secure link */
-}
-EXPORT_SYMBOL(hci_conn_check_secure);
-
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
@@ -817,6 +889,98 @@
}
}
+struct hci_chan *hci_chan_accept(struct hci_conn *conn,
+ struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
+{
+ struct hci_chan *chan;
+ struct hci_cp_create_logical_link cp;
+
+ chan = hci_chan_add(conn->hdev);
+ if (!chan)
+ return NULL;
+
+ chan->state = BT_CONNECT;
+ chan->conn = conn;
+ chan->tx_fs = *tx_fs;
+ chan->rx_fs = *rx_fs;
+ cp.phy_handle = chan->conn->handle;
+ cp.tx_fs.id = chan->tx_fs.id;
+ cp.tx_fs.type = chan->tx_fs.type;
+ cp.tx_fs.max_sdu = cpu_to_le16(chan->tx_fs.max_sdu);
+ cp.tx_fs.sdu_arr_time = cpu_to_le32(chan->tx_fs.sdu_arr_time);
+ cp.tx_fs.acc_latency = cpu_to_le32(chan->tx_fs.acc_latency);
+ cp.tx_fs.flush_to = cpu_to_le32(chan->tx_fs.flush_to);
+ cp.rx_fs.id = chan->rx_fs.id;
+ cp.rx_fs.type = chan->rx_fs.type;
+ cp.rx_fs.max_sdu = cpu_to_le16(chan->rx_fs.max_sdu);
+ cp.rx_fs.sdu_arr_time = cpu_to_le32(chan->rx_fs.sdu_arr_time);
+ cp.rx_fs.acc_latency = cpu_to_le32(chan->rx_fs.acc_latency);
+ cp.rx_fs.flush_to = cpu_to_le32(chan->rx_fs.flush_to);
+ hci_conn_hold(chan->conn);
+ hci_send_cmd(conn->hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), &cp);
+ return chan;
+}
+EXPORT_SYMBOL(hci_chan_accept);
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn,
+ struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
+{
+ struct hci_chan *chan;
+ struct hci_cp_create_logical_link cp;
+
+ chan = hci_chan_add(conn->hdev);
+ if (!chan)
+ return NULL;
+
+ chan->state = BT_CONNECT;
+ chan->conn = conn;
+ chan->tx_fs = *tx_fs;
+ chan->rx_fs = *rx_fs;
+ cp.phy_handle = chan->conn->handle;
+ cp.tx_fs.id = chan->tx_fs.id;
+ cp.tx_fs.type = chan->tx_fs.type;
+ cp.tx_fs.max_sdu = cpu_to_le16(chan->tx_fs.max_sdu);
+ cp.tx_fs.sdu_arr_time = cpu_to_le32(chan->tx_fs.sdu_arr_time);
+ cp.tx_fs.acc_latency = cpu_to_le32(chan->tx_fs.acc_latency);
+ cp.tx_fs.flush_to = cpu_to_le32(chan->tx_fs.flush_to);
+ cp.rx_fs.id = chan->rx_fs.id;
+ cp.rx_fs.type = chan->rx_fs.type;
+ cp.rx_fs.max_sdu = cpu_to_le16(chan->rx_fs.max_sdu);
+ cp.rx_fs.sdu_arr_time = cpu_to_le32(chan->rx_fs.sdu_arr_time);
+ cp.rx_fs.acc_latency = cpu_to_le32(chan->rx_fs.acc_latency);
+ cp.rx_fs.flush_to = cpu_to_le32(chan->rx_fs.flush_to);
+ hci_conn_hold(chan->conn);
+ hci_send_cmd(conn->hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), &cp);
+ return chan;
+}
+EXPORT_SYMBOL(hci_chan_create);
+
+void hci_chan_modify(struct hci_chan *chan,
+ struct hci_ext_fs *tx_fs, struct hci_ext_fs *rx_fs)
+{
+ struct hci_cp_flow_spec_modify cp;
+
+ chan->tx_fs = *tx_fs;
+ chan->rx_fs = *rx_fs;
+ cp.log_handle = cpu_to_le16(chan->ll_handle);
+ cp.tx_fs.id = tx_fs->id;
+ cp.tx_fs.type = tx_fs->type;
+ cp.tx_fs.max_sdu = cpu_to_le16(tx_fs->max_sdu);
+ cp.tx_fs.sdu_arr_time = cpu_to_le32(tx_fs->sdu_arr_time);
+ cp.tx_fs.acc_latency = cpu_to_le32(tx_fs->acc_latency);
+ cp.tx_fs.flush_to = cpu_to_le32(tx_fs->flush_to);
+ cp.rx_fs.id = rx_fs->id;
+ cp.rx_fs.type = rx_fs->type;
+ cp.rx_fs.max_sdu = cpu_to_le16(rx_fs->max_sdu);
+ cp.rx_fs.sdu_arr_time = cpu_to_le32(rx_fs->sdu_arr_time);
+ cp.rx_fs.acc_latency = cpu_to_le32(rx_fs->acc_latency);
+ cp.rx_fs.flush_to = cpu_to_le32(rx_fs->flush_to);
+ hci_conn_hold(chan->conn);
+ hci_send_cmd(chan->conn->hdev, HCI_OP_FLOW_SPEC_MODIFY, sizeof(cp),
+ &cp);
+}
+EXPORT_SYMBOL(hci_chan_modify);
+
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
@@ -961,6 +1125,8 @@
ci.cnt = hdev->acl_cnt;
ci.pkts = hdev->acl_pkts;
}
+ ci.pending_sec_level = conn->pending_sec_level;
+ ci.ssp_mode = conn->ssp_mode;
}
hci_dev_unlock_bh(hdev);
@@ -989,3 +1155,23 @@
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
+
+int hci_set_auth_info(struct hci_dev *hdev, void __user *arg)
+{
+ struct hci_auth_info_req req;
+ struct hci_conn *conn;
+
+ if (copy_from_user(&req, arg, sizeof(req)))
+ return -EFAULT;
+
+ hci_dev_lock_bh(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
+ if (conn)
+ conn->auth_type = req.type;
+ hci_dev_unlock_bh(hdev);
+
+ if (!conn)
+ return -ENOENT;
+
+ return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3b39198..b095e99 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -60,6 +60,8 @@
static DEFINE_RWLOCK(hci_task_lock);
+static int enable_smp;
+
/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);
@@ -68,6 +70,10 @@
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
+/* AMP Manager event callbacks */
+LIST_HEAD(amp_mgr_cb_list);
+DEFINE_RWLOCK(amp_mgr_cb_list_lock);
+
/* HCI protocols */
#define HCI_MAX_PROTO 2
struct hci_proto *hci_proto[HCI_MAX_PROTO];
@@ -146,7 +152,7 @@
switch (hdev->req_status) {
case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
+ err = -bt_err(hdev->req_result);
break;
case HCI_REQ_CANCELED:
@@ -219,15 +225,25 @@
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
- /* Read Local Supported Features */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Set default HCI Flow Control Mode */
+ if (hdev->dev_type == HCI_BREDR)
+ hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
+ else
+ hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
+
+ /* Read HCI Flow Control Mode */
+ hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+ /* Read Data Block Size (ACL mtu, max pkt, etc.) */
+ hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
#if 0
/* Host buffer size */
{
@@ -240,31 +256,46 @@
}
#endif
- /* Read BD Address */
- hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+ if (hdev->dev_type == HCI_BREDR) {
+ /* BR-EDR initialization */
- /* Read Class of Device */
- hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+ /* Read Local Supported Features */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
- /* Read Local Name */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+ /* Read BD Address */
+ hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
- /* Read Voice Setting */
- hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+ /* Read Class of Device */
+ hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
- /* Optional initialization */
+ /* Read Local Name */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
- /* Clear Event Filters */
- flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
+ /* Read Voice Setting */
+ hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
- /* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
- hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+ /* Optional initialization */
+ /* Clear Event Filters */
+ flt_type = HCI_FLT_CLEAR_ALL;
+ hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 1;
- hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
+ /* Connection accept timeout ~20 secs */
+ param = cpu_to_le16(0x7d00);
+ hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 1;
+ hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp);
+ } else {
+ /* AMP initialization */
+ /* Connection accept timeout ~5 secs */
+ param = cpu_to_le16(0x1f40);
+ hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
+
+ /* Read AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ }
}
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -338,6 +369,7 @@
read_unlock(&hci_dev_list_lock);
return hdev;
}
+EXPORT_SYMBOL(hci_dev_get);
/* ---- Inquiry support ---- */
static void inquiry_cache_flush(struct hci_dev *hdev)
@@ -523,10 +555,6 @@
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- /* Treat all non BR/EDR controllers as raw devices for now */
- if (hdev->dev_type != HCI_BREDR)
- set_bit(HCI_RAW, &hdev->flags);
-
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
@@ -540,7 +568,7 @@
ret = __hci_request(hdev, hci_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
- if (lmp_host_le_capable(hdev))
+ if (lmp_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
@@ -1021,50 +1049,17 @@
return NULL;
}
-static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
- u8 key_type, u8 old_key_type)
-{
- /* Legacy key */
- if (key_type < 0x03)
- return 1;
-
- /* Debug keys are insecure so don't store them persistently */
- if (key_type == HCI_LK_DEBUG_COMBINATION)
- return 0;
-
- /* Changed combination key and there's no previous one */
- if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
- return 0;
-
- /* Security mode 3 case */
- if (!conn)
- return 1;
-
- /* Neither local nor remote side had no-bonding as requirement */
- if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
- return 1;
-
- /* Local side had dedicated bonding as requirement */
- if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
- return 1;
-
- /* Remote side had dedicated bonding as requirement */
- if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
- return 1;
-
- /* If none of the above criteria match, then don't store the key
- * persistently */
- return 0;
-}
-
struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
{
- struct link_key *k;
+ struct list_head *p;
- list_for_each_entry(k, &hdev->link_keys, list) {
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
struct key_master_id *id;
- if (k->type != HCI_LK_SMP_LTK)
+ k = list_entry(p, struct link_key, list);
+
+ if (k->type != KEY_TYPE_LTK)
continue;
if (k->dlen != sizeof(*id))
@@ -1083,28 +1078,33 @@
struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 type)
{
- struct link_key *k;
+ struct list_head *p;
- list_for_each_entry(k, &hdev->link_keys, list)
- if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
+ list_for_each(p, &hdev->link_keys) {
+ struct link_key *k;
+
+ k = list_entry(p, struct link_key, list);
+
+ if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
return k;
+ }
return NULL;
}
EXPORT_SYMBOL(hci_find_link_key_type);
-int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
- bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
+int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
+ u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
- u8 old_key_type, persistent;
+ u8 old_key_type;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
old_key_type = old_key->type;
key = old_key;
} else {
- old_key_type = conn ? conn->key_type : 0xff;
+ old_key_type = 0xff;
key = kzalloc(sizeof(*key), GFP_ATOMIC);
if (!key)
return -ENOMEM;
@@ -1113,37 +1113,16 @@
BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
- /* Some buggy controller combinations generate a changed
- * combination key for legacy pairing even when there's no
- * previous key */
- if (type == HCI_LK_CHANGED_COMBINATION &&
- (!conn || conn->remote_auth == 0xff) &&
- old_key_type == 0xff) {
- type = HCI_LK_COMBINATION;
- if (conn)
- conn->key_type = type;
- }
-
bacpy(&key->bdaddr, bdaddr);
memcpy(key->val, val, 16);
+ key->type = type;
key->pin_len = pin_len;
- if (type == HCI_LK_CHANGED_COMBINATION)
+ if (new_key)
+ mgmt_new_key(hdev->id, key, old_key_type);
+
+ if (type == 0x06)
key->type = old_key_type;
- else
- key->type = type;
-
- if (!new_key)
- return 0;
-
- persistent = hci_persistent_key(hdev, conn, type, old_key_type);
-
- mgmt_new_key(hdev->id, key, persistent);
-
- if (!persistent) {
- list_del(&key->list);
- kfree(key);
- }
return 0;
}
@@ -1157,7 +1136,7 @@
BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
- old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
+ old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
if (old_key) {
key = old_key;
old_key_type = old_key->type;
@@ -1173,7 +1152,7 @@
bacpy(&key->bdaddr, bdaddr);
memcpy(key->val, ltk, sizeof(key->val));
- key->type = HCI_LK_SMP_LTK;
+ key->type = KEY_TYPE_LTK;
key->pin_len = key_size;
id = (void *) &key->data;
@@ -1209,6 +1188,7 @@
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
+ clear_bit(HCI_RESET, &hdev->flags);
tasklet_schedule(&hdev->cmd_task);
}
@@ -1252,6 +1232,63 @@
return 0;
}
+static void hci_adv_clear(unsigned long arg)
+{
+ struct hci_dev *hdev = (void *) arg;
+
+ hci_adv_entries_clear(hdev);
+}
+
+int hci_adv_entries_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ write_lock_bh(&hdev->adv_entries_lock);
+
+ list_for_each_safe(p, n, &hdev->adv_entries) {
+ struct adv_entry *entry;
+
+ entry = list_entry(p, struct adv_entry, list);
+
+ list_del(p);
+ kfree(entry);
+ }
+
+ write_unlock_bh(&hdev->adv_entries_lock);
+
+ return 0;
+}
+
+struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct list_head *p;
+ struct adv_entry *res = NULL;
+
+ read_lock_bh(&hdev->adv_entries_lock);
+
+ list_for_each(p, &hdev->adv_entries) {
+ struct adv_entry *entry;
+
+ entry = list_entry(p, struct adv_entry, list);
+
+ if (bacmp(bdaddr, &entry->bdaddr) == 0) {
+ res = entry;
+ goto out;
+ }
+ }
+out:
+ read_unlock_bh(&hdev->adv_entries_lock);
+ return res;
+}
+
+static inline int is_connectable_adv(u8 evt_type)
+{
+ if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
+ return 1;
+
+ return 0;
+}
+
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
u8 *randomizer)
{
@@ -1276,141 +1313,6 @@
return 0;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
- bdaddr_t *bdaddr)
-{
- struct list_head *p;
-
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
-
- if (bacmp(bdaddr, &b->bdaddr) == 0)
- return b;
- }
-
- return NULL;
-}
-
-int hci_blacklist_clear(struct hci_dev *hdev)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
-
- list_del(p);
- kfree(b);
- }
-
- return 0;
-}
-
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct bdaddr_list *entry;
- int err;
-
- if (bacmp(bdaddr, BDADDR_ANY) == 0)
- return -EBADF;
-
- hci_dev_lock_bh(hdev);
-
- if (hci_blacklist_lookup(hdev, bdaddr)) {
- err = -EEXIST;
- goto err;
- }
-
- entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry) {
- return -ENOMEM;
- goto err;
- }
-
- bacpy(&entry->bdaddr, bdaddr);
-
- list_add(&entry->list, &hdev->blacklist);
-
- err = 0;
-
-err:
- hci_dev_unlock_bh(hdev);
- return err;
-}
-
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct bdaddr_list *entry;
- int err = 0;
-
- hci_dev_lock_bh(hdev);
-
- if (bacmp(bdaddr, BDADDR_ANY) == 0) {
- hci_blacklist_clear(hdev);
- goto done;
- }
-
- entry = hci_blacklist_lookup(hdev, bdaddr);
- if (!entry) {
- err = -ENOENT;
- goto done;
- }
-
- list_del(&entry->list);
- kfree(entry);
-
-done:
- hci_dev_unlock_bh(hdev);
- return err;
-}
-
-static void hci_clear_adv_cache(unsigned long arg)
-{
- struct hci_dev *hdev = (void *) arg;
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct adv_entry *entry;
-
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
-
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
int hci_add_adv_entry(struct hci_dev *hdev,
struct hci_ev_le_advertising_info *ev)
{
@@ -1419,9 +1321,10 @@
if (!is_connectable_adv(ev->evt_type))
return -EINVAL;
+ entry = hci_find_adv_entry(hdev, &ev->bdaddr);
/* Only new entries should be added to adv_entries. So, if
* bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
+ if (entry)
return 0;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -1431,14 +1334,21 @@
bacpy(&entry->bdaddr, &ev->bdaddr);
entry->bdaddr_type = ev->bdaddr_type;
+ write_lock(&hdev->adv_entries_lock);
list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
+ write_unlock(&hdev->adv_entries_lock);
return 0;
}
+static struct crypto_blkcipher *alloc_cypher(void)
+{
+ if (enable_smp)
+ return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
@@ -1496,6 +1406,7 @@
inquiry_cache_init(hdev);
hci_conn_hash_init(hdev);
+ hci_chan_list_init(hdev);
INIT_LIST_HEAD(&hdev->blacklist);
@@ -1506,8 +1417,8 @@
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
- setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
- (unsigned long) hdev);
+ rwlock_init(&hdev->adv_entries_lock);
+ setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->power_off, hci_power_off);
@@ -1523,7 +1434,7 @@
if (!hdev->workqueue)
goto nomem;
- hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ hdev->tfm = alloc_cypher();
if (IS_ERR(hdev->tfm))
BT_INFO("Failed to load transform for ecb(aes): %ld",
PTR_ERR(hdev->tfm));
@@ -1883,6 +1794,74 @@
}
EXPORT_SYMBOL(hci_unregister_cb);
+int hci_register_amp(struct amp_mgr_cb *cb)
+{
+ BT_DBG("%p", cb);
+
+ write_lock_bh(&_mgr_cb_list_lock);
+ list_add(&cb->list, &_mgr_cb_list);
+ write_unlock_bh(&_mgr_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_register_amp);
+
+int hci_unregister_amp(struct amp_mgr_cb *cb)
+{
+ BT_DBG("%p", cb);
+
+ write_lock_bh(&_mgr_cb_list_lock);
+ list_del(&cb->list);
+ write_unlock_bh(&_mgr_cb_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_unregister_amp);
+
+void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
+ struct sk_buff *skb)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("opcode 0x%x", opcode);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_cmd_complete_event)
+ cb->amp_cmd_complete_event(hdev, opcode, skb);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
+void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("opcode 0x%x, status %d", opcode, status);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_cmd_status_event)
+ cb->amp_cmd_status_event(hdev, opcode, status);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
+void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
+ struct sk_buff *skb)
+{
+ struct amp_mgr_cb *cb;
+
+ BT_DBG("ev_code 0x%x", ev_code);
+
+ read_lock_bh(&_mgr_cb_list_lock);
+ list_for_each_entry(cb, &_mgr_cb_list, list) {
+ if (cb->amp_event)
+ cb->amp_event(hdev, ev_code, skb);
+ }
+ read_unlock_bh(&_mgr_cb_list_lock);
+}
+
static int hci_send_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
@@ -1904,6 +1883,7 @@
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
+ hci_notify(hdev, HCI_DEV_WRITE);
return hdev->send(skb);
}
@@ -1942,6 +1922,7 @@
return 0;
}
+EXPORT_SYMBOL(hci_send_cmd);
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
@@ -1974,16 +1955,20 @@
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
+ BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
+ if (hdev->dev_type == HCI_BREDR)
+ hci_add_acl_hdr(skb, conn->handle, flags);
+ else
+ hci_add_acl_hdr(skb, chan->ll_handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
@@ -2001,8 +1986,7 @@
spin_lock_bh(&conn->data_q.lock);
__skb_queue_tail(&conn->data_q, skb);
-
- flags &= ~ACL_START;
+ flags &= ~ACL_PB_MASK;
flags |= ACL_CONT;
do {
skb = list; list = list->next;
@@ -2134,21 +2118,38 @@
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
- if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
+ if (hdev->acl_cnt <= 0 &&
+ time_after(jiffies, hdev->acl_last_tx + HZ * 45))
hci_link_tx_to(hdev, ACL_LINK);
}
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ while (hdev->acl_cnt > 0 &&
+ (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
+ while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+ int count = 1;
+
BT_DBG("skb %p len %d", skb, skb->len);
+ if (hdev->flow_ctl_mode ==
+ HCI_BLOCK_BASED_FLOW_CTL_MODE)
+ /* Calculate count of blocks used by
+ * this packet
+ */
+ count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
+ hdev->data_block_len) + 1;
+
+ if (count > hdev->acl_cnt)
+ return;
+
hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
- hdev->acl_cnt--;
- conn->sent++;
+ hdev->acl_cnt -= count;
+ quote -= count;
+
+ conn->sent += count;
}
}
}
@@ -2255,7 +2256,7 @@
read_unlock(&hci_task_lock);
}
-/* ----- HCI RX task (incoming data processing) ----- */
+/* ----- HCI RX task (incoming data proccessing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2407,10 +2408,7 @@
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- if (test_bit(HCI_RESET, &hdev->flags))
- del_timer(&hdev->cmd_timer);
- else
- mod_timer(&hdev->cmd_timer,
+ mod_timer(&hdev->cmd_timer,
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
@@ -2418,3 +2416,6 @@
}
}
}
+
+module_param(enable_smp, bool, 0644);
+MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6cddd03..84e3975 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+ Copyright (c) 2000-2001, 2010-2011, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -45,8 +45,6 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-static int enable_le;
-
/* Handle HCI Event packets */
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -58,9 +56,7 @@
if (status)
return;
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ clear_bit(HCI_INQUIRY, &hdev->flags);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,13 +72,36 @@
if (status)
return;
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ clear_bit(HCI_INQUIRY, &hdev->flags);
hci_conn_check_pending(hdev);
}
+static void hci_cc_link_key_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_link_key_reply *rp = (void *) skb->data;
+ struct hci_conn *conn;
+ struct hci_cp_link_key_reply *cp;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ if (rp->status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LINK_KEY_REPLY);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn) {
+ hci_conn_hold(conn);
+ memcpy(conn->link_key, cp->link_key, sizeof(conn->link_key));
+ conn->key_type = 5;
+ hci_conn_put(conn);
+ }
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
@@ -479,16 +498,14 @@
* command otherwise */
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
- /* CSR 1.1 dongles does not accept any bitfield so don't try to set
- * any event mask for pre 1.2 devices */
- if (hdev->lmp_ver <= 1)
- return;
-
- events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
+ /* Events for 1.2 and newer controllers */
+ if (hdev->lmp_ver > 1) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
@@ -527,20 +544,6 @@
hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
}
-static void hci_set_le_support(struct hci_dev *hdev)
-{
- struct hci_cp_write_le_host_supported cp;
-
- memset(&cp, 0, sizeof(cp));
-
- if (enable_le) {
- cp.le = 1;
- cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
- }
-
- hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
-}
-
static void hci_setup(struct hci_dev *hdev)
{
hci_setup_event_mask(hdev);
@@ -558,17 +561,6 @@
if (hdev->features[7] & LMP_INQ_TX_PWR)
hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
-
- if (hdev->features[7] & LMP_EXTFEATURES) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = 0x01;
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
- }
-
- if (hdev->features[4] & LMP_LE)
- hci_set_le_support(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -590,7 +582,7 @@
hdev->manufacturer,
hdev->hci_ver, hdev->hci_rev);
- if (test_bit(HCI_INIT, &hdev->flags))
+ if (hdev->dev_type == HCI_BREDR && test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
}
@@ -685,19 +677,17 @@
hdev->features[6], hdev->features[7]);
}
-static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
+ struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (rp->status)
return;
- memcpy(hdev->extfeatures, rp->features, 8);
-
- hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
+ hdev->flow_ctl_mode = rp->mode;
}
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -709,18 +699,20 @@
if (rp->status)
return;
- hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
- hdev->sco_mtu = rp->sco_mtu;
- hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
- hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
+ if (hdev->flow_ctl_mode == HCI_PACKET_BASED_FLOW_CTL_MODE) {
+ hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
+ hdev->sco_mtu = rp->sco_mtu;
+ hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
+ hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
+ hdev->acl_cnt = hdev->acl_pkts;
+ hdev->sco_cnt = hdev->sco_pkts;
+ }
if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
hdev->sco_mtu = 64;
hdev->sco_pkts = 8;
}
- hdev->acl_cnt = hdev->acl_pkts;
- hdev->sco_cnt = hdev->sco_pkts;
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
hdev->acl_mtu, hdev->acl_pkts,
@@ -748,6 +740,55 @@
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ if (hdev->flow_ctl_mode == HCI_BLOCK_BASED_FLOW_CTL_MODE) {
+ hdev->acl_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->sco_mtu = 0;
+ hdev->data_block_len = __le16_to_cpu(rp->data_block_len);
+ /* acl_pkts indicates the number of blocks */
+ hdev->acl_pkts = __le16_to_cpu(rp->num_blocks);
+ hdev->sco_pkts = 0;
+ hdev->acl_cnt = hdev->acl_pkts;
+ hdev->sco_cnt = 0;
+ }
+
+ BT_DBG("%s acl mtu %d:%d, data block len %d", hdev->name,
+ hdev->acl_mtu, hdev->acl_cnt, hdev->data_block_len);
+}
+
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->amp_status = rp->amp_status;
+ hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+ hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+ hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+ hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+ hdev->amp_type = rp->amp_type;
+ hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+ hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+ hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+ hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -883,33 +924,6 @@
rp->randomizer, rp->status);
}
-static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_cp_le_set_scan_enable *cp;
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (cp->enable == 0x01) {
- del_timer(&hdev->adv_timer);
- hci_adv_entries_clear(hdev);
- } else if (cp->enable == 0x00) {
- mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
- }
-
- hci_dev_unlock(hdev);
-}
-
static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
@@ -934,19 +948,27 @@
hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
}
-static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- struct hci_cp_read_local_ext_features cp;
+ void *sent;
+ __u8 param_scan_enable;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
-
if (status)
return;
- cp.page = 0x01;
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+ if (!sent)
+ return;
+
+ param_scan_enable = *((__u8 *) sent);
+ if (param_scan_enable == 0x01) {
+ del_timer(&hdev->adv_timer);
+ hci_adv_entries_clear(hdev);
+ } else if (param_scan_enable == 0x00) {
+ mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+ }
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -955,14 +977,10 @@
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
- hci_conn_check_pending(hdev);
- return;
- }
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- !test_and_set_bit(HCI_INQUIRY,
- &hdev->flags))
- mgmt_discovering(hdev->id, 1);
+ hci_conn_check_pending(hdev);
+ } else
+ set_bit(HCI_INQUIRY, &hdev->flags);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1131,19 +1149,12 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn)
- goto unlock;
-
- if (!hci_outgoing_auth_needed(hdev, conn))
- goto unlock;
-
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -1316,19 +1327,150 @@
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
- if (conn) {
- conn->dst_type = cp->peer_addr_type;
+ conn = hci_le_conn_add(hdev, &cp->peer_addr,
+ cp->peer_addr_type);
+ if (conn)
conn->out = 1;
- } else {
+ else
BT_ERR("No memory for new connection");
- }
}
}
hci_dev_unlock(hdev);
}
+static void hci_cs_accept_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_create_logical_link *ap;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ ap = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_LOGICAL_LINK);
+ if (!ap)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, ap->phy_handle);
+
+ BT_DBG("%s chan %p", hdev->name, chan);
+
+ if (status) {
+ if (chan && chan->state == BT_CONNECT) {
+ chan->state = BT_CLOSED;
+ hci_proto_create_cfm(chan, status);
+ hci_chan_del(chan);
+ }
+ } else if (chan)
+ chan->state = BT_CONNECT2;
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_create_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_create_logical_link *cp;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_LOGICAL_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, cp->phy_handle);
+
+ BT_DBG("%s chan %p", hdev->name, chan);
+
+ if (status) {
+ if (chan && chan->state == BT_CONNECT) {
+ chan->state = BT_CLOSED;
+ hci_proto_create_cfm(chan, status);
+ hci_chan_del(chan);
+ }
+ } else if (chan)
+ chan->state = BT_CONNECT2;
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_flow_spec_modify(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_flow_spec_modify *cp;
+ struct hci_chan *chan;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_FLOW_SPEC_MODIFY);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
+ if (chan) {
+ if (status)
+ hci_proto_modify_cfm(chan, status);
+ else {
+ chan->tx_fs = cp->tx_fs;
+ chan->rx_fs = cp->rx_fs;
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_disconn_logical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_disconn_logical_link *cp;
+ struct hci_chan *chan;
+
+ if (!status)
+ return;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_LOGICAL_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, cp->log_handle);
+ if (chan)
+ hci_chan_del(chan);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_disconn_physical_link(struct hci_dev *hdev, __u8 status)
+{
+ struct hci_cp_disconn_phys_link *cp;
+ struct hci_conn *conn;
+
+ if (!status)
+ return;
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONN_PHYS_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (conn) {
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -1340,9 +1482,7 @@
BT_DBG("%s status %d", hdev->name, status);
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ clear_bit(HCI_INQUIRY, &hdev->flags);
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1362,12 +1502,6 @@
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1521,9 +1655,9 @@
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.max_latency = cpu_to_le16(0x000A);
cp.content_format = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.retrans_effort = 0x01;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
@@ -1558,7 +1692,7 @@
conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK)
+ if (conn->type == ACL_LINK)
mgmt_disconnected(hdev->id, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
@@ -1578,58 +1712,63 @@
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (!conn)
- goto unlock;
+ if (conn) {
+ if (ev->status == 0x06 && hdev->ssp_mode > 0 &&
+ conn->ssp_mode > 0) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+ hci_dev_unlock(hdev);
+ BT_INFO("Pin or key missing");
+ return;
+ }
- if (!ev->status) {
- if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
- BT_INFO("re-auth of legacy device is not possible.");
- } else {
+ if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
- }
- } else {
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
- }
-
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
- clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
-
- if (conn->state == BT_CONFIG) {
- if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
} else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
+ mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ conn->sec_level = BT_SECURITY_LOW;
+ }
+
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hdev->ssp_mode > 0 &&
+ conn->ssp_mode > 0) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ } else {
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_put(conn);
+ }
+ } else {
+ hci_auth_cfm(conn, ev->status);
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_put(conn);
}
- } else {
- hci_auth_cfm(conn, ev->status);
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
- }
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- if (!ev->status) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
- } else {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
- hci_encrypt_cfm(conn, ev->status, 0x00);
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
+ if (!ev->status) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
+ sizeof(cp), &cp);
+ } else {
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ hci_encrypt_cfm(conn, ev->status, 0x00);
+ }
}
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -1648,19 +1787,12 @@
mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
- goto unlock;
-
- if (!hci_outgoing_auth_needed(hdev, conn))
- goto unlock;
-
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
-unlock:
hci_dev_unlock(hdev);
}
@@ -1796,6 +1928,10 @@
hci_cc_exit_periodic_inq(hdev, skb);
break;
+ case HCI_OP_LINK_KEY_REPLY:
+ hci_cc_link_key_reply(hdev, skb);
+ break;
+
case HCI_OP_REMOTE_NAME_REQ_CANCEL:
hci_cc_remote_name_req_cancel(hdev, skb);
break;
@@ -1884,10 +2020,6 @@
hci_cc_read_local_features(hdev, skb);
break;
- case HCI_OP_READ_LOCAL_EXT_FEATURES:
- hci_cc_read_local_ext_features(hdev, skb);
- break;
-
case HCI_OP_READ_BUFFER_SIZE:
hci_cc_read_buffer_size(hdev, skb);
break;
@@ -1900,6 +2032,23 @@
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_READ_FLOW_CONTROL_MODE:
+ hci_cc_read_flow_control_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_DATA_BLOCK_SIZE:
+ hci_cc_read_data_block_size(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_INFO:
+ hci_cc_read_local_amp_info(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_ASSOC:
+ case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
+ hci_amp_cmd_complete(hdev, opcode, skb);
+ break;
+
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
@@ -1944,10 +2093,6 @@
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
- case HCI_OP_LE_SET_SCAN_ENABLE:
- hci_cc_le_set_scan_enable(hdev, skb);
- break;
-
case HCI_OP_LE_LTK_REPLY:
hci_cc_le_ltk_reply(hdev, skb);
break;
@@ -1956,8 +2101,8 @@
hci_cc_le_ltk_neg_reply(hdev, skb);
break;
- case HCI_OP_WRITE_LE_HOST_SUPPORTED:
- hci_cc_write_le_host_supported(hdev, skb);
+ case HCI_OP_LE_SET_SCAN_ENABLE:
+ hci_cc_le_set_scan_enable(hdev, skb);
break;
default:
@@ -2029,6 +2174,30 @@
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
+ case HCI_OP_CREATE_LOGICAL_LINK:
+ hci_cs_create_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_ACCEPT_LOGICAL_LINK:
+ hci_cs_accept_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_DISCONN_LOGICAL_LINK:
+ hci_cs_disconn_logical_link(hdev, ev->status);
+ break;
+
+ case HCI_OP_FLOW_SPEC_MODIFY:
+ hci_cs_flow_spec_modify(hdev, ev->status);
+ break;
+
+ case HCI_OP_CREATE_PHYS_LINK:
+ case HCI_OP_ACCEPT_PHYS_LINK:
+ hci_amp_cmd_status(hdev, opcode, ev->status);
+ break;
+
+ case HCI_OP_DISCONN_PHYS_LINK:
+ hci_cs_disconn_physical_link(hdev, ev->status);
+
case HCI_OP_DISCONNECT:
if (ev->status != 0)
mgmt_disconnect_failed(hdev->id);
@@ -2101,13 +2270,20 @@
tasklet_disable(&hdev->tx_task);
for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
- struct hci_conn *conn;
+ struct hci_conn *conn = NULL;
+ struct hci_chan *chan;
__u16 handle, count;
handle = get_unaligned_le16(ptr++);
count = get_unaligned_le16(ptr++);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (hdev->dev_type == HCI_BREDR)
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ else {
+ chan = hci_chan_list_lookup_handle(hdev, handle);
+ if (chan)
+ conn = chan->conn;
+ }
if (conn) {
conn->sent -= count;
@@ -2138,6 +2314,68 @@
tasklet_enable(&hdev->tx_task);
}
+static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
+ __le16 *ptr;
+ int i;
+
+ skb_pull(skb, sizeof(*ev));
+
+ BT_DBG("%s total_num_blocks %d num_hndl %d",
+ hdev->name, ev->total_num_blocks, ev->num_hndl);
+
+ if (skb->len < ev->num_hndl * 6) {
+ BT_DBG("%s bad parameters", hdev->name);
+ return;
+ }
+
+ tasklet_disable(&hdev->tx_task);
+
+ for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ struct hci_conn *conn = NULL;
+ struct hci_chan *chan;
+ __u16 handle, block_count;
+
+ handle = get_unaligned_le16(ptr++);
+
+ /* Skip packet count */
+ ptr++;
+ block_count = get_unaligned_le16(ptr++);
+
+ BT_DBG("%s handle %d count %d", hdev->name, handle,
+ block_count);
+
+ if (hdev->dev_type == HCI_BREDR)
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ else {
+ chan = hci_chan_list_lookup_handle(hdev, handle);
+ if (chan)
+ conn = chan->conn;
+ }
+ if (conn) {
+ BT_DBG("%s conn %p sent %d", hdev->name,
+ conn, conn->sent);
+
+ conn->sent -= block_count;
+
+ if (conn->type == ACL_LINK) {
+ hdev->acl_cnt += block_count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ } else {
+ /* We should not find ourselves here */
+ BT_DBG("Unexpected event for SCO connection");
+ }
+ }
+ }
+
+ tasklet_schedule(&hdev->tx_task);
+
+ tasklet_enable(&hdev->tx_task);
+}
+
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
@@ -2185,16 +2423,9 @@
if (!test_bit(HCI_PAIRABLE, &hdev->flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
- else if (test_bit(HCI_MGMT, &hdev->flags)) {
- u8 secure;
- if (conn->pending_sec_level == BT_SECURITY_HIGH)
- secure = 1;
- else
- secure = 0;
-
- mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
- }
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_pin_code_request(hdev->id, &ev->bdaddr);
hci_dev_unlock(hdev);
}
@@ -2223,30 +2454,17 @@
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
batostr(&ev->bdaddr));
- if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
- key->type == HCI_LK_DEBUG_COMBINATION) {
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn) {
- if (key->type == HCI_LK_UNAUTH_COMBINATION &&
- conn->auth_type != 0xff &&
- (conn->auth_type & 0x01)) {
- BT_DBG("%s ignoring unauthenticated key", hdev->name);
- goto not_found;
- }
- if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
- conn->pending_sec_level == BT_SECURITY_HIGH) {
- BT_DBG("%s ignoring key unauthenticated for high \
- security", hdev->name);
- goto not_found;
- }
-
- conn->key_type = key->type;
- conn->pin_length = key->pin_len;
+ if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
+ (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
}
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -2269,7 +2487,7 @@
struct hci_conn *conn;
u8 pin_len = 0;
- BT_DBG("%s", hdev->name);
+ BT_DBG("%s type %d", hdev->name, ev->key_type);
hci_dev_lock(hdev);
@@ -2277,16 +2495,17 @@
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ memcpy(conn->link_key, ev->link_key, 16);
+ conn->key_type = ev->key_type;
+ hci_disconnect_amp(conn, 0x06);
+
pin_len = conn->pin_length;
-
- if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
- conn->key_type = ev->key_type;
-
hci_conn_put(conn);
}
if (test_bit(HCI_LINK_KEYS, &hdev->flags))
- hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
+ hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
ev->key_type, pin_len);
hci_dev_unlock(hdev);
@@ -2361,12 +2580,6 @@
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
@@ -2481,7 +2694,6 @@
hci_conn_add_sysfs(conn);
break;
- case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
@@ -2530,12 +2742,6 @@
if (!num_rsp)
return;
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2569,7 +2775,7 @@
/* If remote requests no-bonding follow that lead */
if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
- return conn->remote_auth | (conn->auth_type & 0x01);
+ return 0x00;
return conn->auth_type;
}
@@ -2598,8 +2804,7 @@
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.capability = conn->io_capability;
- conn->auth_type = hci_get_auth_req(conn);
- cp.authentication = conn->auth_type;
+ cp.authentication = hci_get_auth_req(conn);
if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
hci_find_remote_oob_data(hdev, &conn->dst))
@@ -2613,7 +2818,7 @@
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x18; /* Pairing not allowed */
+ cp.reason = 0x16; /* Pairing not allowed */
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
@@ -2648,67 +2853,14 @@
struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
- int loc_mitm, rem_mitm, confirm_hint = 0;
- struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- if (!test_bit(HCI_MGMT, &hdev->flags))
- goto unlock;
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
- goto unlock;
-
- loc_mitm = (conn->auth_type & 0x01);
- rem_mitm = (conn->remote_auth & 0x01);
-
- /* If we require MITM but the remote device can't provide that
- * (it has NoInputNoOutput) then reject the confirmation
- * request. The only exception is when we're dedicated bonding
- * initiators (connect_cfm_cb set) since then we always have the MITM
- * bit set. */
- if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
- BT_DBG("Rejecting request: remote device can't provide MITM");
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
- goto unlock;
- }
-
- /* If no side requires MITM protection; auto-accept */
- if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
-
- /* If we're not the initiators request authorization to
- * proceed from user space (mgmt_user_confirm with
- * confirm_hint set to 1). */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
- BT_DBG("Confirming auto-accept as acceptor");
- confirm_hint = 1;
- goto confirm;
- }
-
- BT_DBG("Auto-accept of user confirmation with %ums delay",
- hdev->auto_accept_delay);
-
- if (hdev->auto_accept_delay > 0) {
- int delay = msecs_to_jiffies(hdev->auto_accept_delay);
- mod_timer(&conn->auto_accept_timer, jiffies + delay);
- goto unlock;
- }
-
- hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
- goto unlock;
- }
-
-confirm:
- mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
- confirm_hint);
-
-unlock:
hci_dev_unlock(hdev);
}
@@ -2801,26 +2953,21 @@
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
+ conn = hci_le_conn_add(hdev, &ev->bdaddr, ev->bdaddr_type);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
return;
}
-
- conn->dst_type = ev->bdaddr_type;
}
if (ev->status) {
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr);
-
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
@@ -2834,27 +2981,6 @@
hci_dev_unlock(hdev);
}
-static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_ev_le_advertising_info *ev;
- u8 num_reports;
-
- num_reports = skb->data[0];
- ev = (void *) &skb->data[1];
-
- hci_dev_lock(hdev);
-
- hci_add_adv_entry(hdev, ev);
-
- while (--num_reports) {
- ev = (void *) (ev->data + ev->length + 1);
- hci_add_adv_entry(hdev, ev);
- }
-
- hci_dev_unlock(hdev);
-}
-
static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -2892,6 +3018,22 @@
hci_dev_unlock(hdev);
}
+static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_advertising_info *ev;
+ u8 num_reports;
+
+ num_reports = skb->data[0];
+ ev = (void *) &skb->data[1];
+ hci_add_adv_entry(hdev, ev);
+
+ while (--num_reports) {
+ ev = (void *) (ev->data + ev->length + 1);
+ hci_add_adv_entry(hdev, ev);
+ }
+}
+
static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -2903,19 +3045,145 @@
hci_le_conn_complete_evt(hdev, skb);
break;
- case HCI_EV_LE_ADVERTISING_REPORT:
- hci_le_adv_report_evt(hdev, skb);
- break;
-
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
+ case HCI_EV_LE_ADVERTISING_REPORT:
+ hci_le_adv_report_evt(hdev, skb);
+ break;
+
default:
break;
}
}
+static inline void hci_phy_link_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_phys_link_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s handle %d status %d", hdev->name, ev->phy_handle,
+ ev->status);
+
+ hci_dev_lock(hdev);
+
+ if (ev->status == 0) {
+ conn = hci_conn_add(hdev, ACL_LINK, 0, BDADDR_ANY);
+ if (conn) {
+ conn->handle = ev->phy_handle;
+ conn->state = BT_CONNECTED;
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT/2;
+ hci_conn_put(conn);
+
+ hci_conn_hold_device(conn);
+ hci_conn_add_sysfs(conn);
+ } else
+ BT_ERR("No memory for new connection");
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_log_link_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_log_link_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_id(hdev, ev->phy_handle);
+
+ if (ev->status == 0) {
+ if (chan) {
+ chan->ll_handle = __le16_to_cpu(ev->log_handle);
+ chan->state = BT_CONNECTED;
+ hci_proto_create_cfm(chan, ev->status);
+ hci_chan_hold(chan);
+ }
+ } else {
+ if (chan) {
+ chan->state = BT_CLOSED;
+ hci_proto_create_cfm(chan, ev->status);
+ hci_chan_del(chan);
+ }
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_flow_spec_modify_complete(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_flow_spec_modify_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, ev->log_handle);
+ if (chan)
+ hci_proto_modify_cfm(chan, ev->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_disconn_log_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_log_link_complete *ev = (void *) skb->data;
+ struct hci_chan *chan;
+
+ BT_DBG("%s handle %d status %d", hdev->name,
+ __le16_to_cpu(ev->log_handle), ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ chan = hci_chan_list_lookup_handle(hdev, __le16_to_cpu(ev->log_handle));
+ if (chan) {
+ hci_proto_destroy_cfm(chan, ev->reason);
+ hci_chan_del(chan);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static inline void hci_disconn_phy_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_phys_link_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (conn) {
+ conn->state = BT_CLOSED;
+
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
@@ -3068,6 +3336,40 @@
hci_remote_oob_data_request_evt(hdev, skb);
break;
+ case HCI_EV_PHYS_LINK_COMPLETE:
+ hci_phy_link_complete(hdev, skb);
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
+ case HCI_EV_LOG_LINK_COMPLETE:
+ hci_log_link_complete(hdev, skb);
+ break;
+
+ case HCI_EV_FLOW_SPEC_MODIFY_COMPLETE:
+ hci_flow_spec_modify_complete(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_LOG_LINK_COMPLETE:
+ hci_disconn_log_link_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_PHYS_LINK_COMPLETE:
+ hci_disconn_phy_link_complete_evt(hdev, skb);
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
+ case HCI_EV_NUM_COMP_BLOCKS:
+ hci_num_comp_blocks_evt(hdev, skb);
+ break;
+
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
+ case HCI_EV_AMP_STATUS_CHANGE:
+ hci_amp_event_packet(hdev, event, skb);
+ break;
+
default:
BT_DBG("%s event 0x%x", hdev->name, event);
break;
@@ -3104,6 +3406,3 @@
hci_send_to_sock(hdev, skb, NULL);
kfree_skb(skb);
}
-
-module_param(enable_le, bool, 0444);
-MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ff02cf5..0046945 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2011, Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -180,24 +180,82 @@
return 0;
}
-static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- bdaddr_t bdaddr;
+ struct list_head *p;
- if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
- return -EFAULT;
+ list_for_each(p, &hdev->blacklist) {
+ struct bdaddr_list *b;
- return hci_blacklist_add(hdev, &bdaddr);
+ b = list_entry(p, struct bdaddr_list, list);
+
+ if (bacmp(bdaddr, &b->bdaddr) == 0)
+ return b;
+ }
+
+ return NULL;
}
-static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
+ struct bdaddr_list *entry;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- return hci_blacklist_del(hdev, &bdaddr);
+ if (bacmp(&bdaddr, BDADDR_ANY) == 0)
+ return -EBADF;
+
+ if (hci_blacklist_lookup(hdev, &bdaddr))
+ return -EEXIST;
+
+ entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ bacpy(&entry->bdaddr, &bdaddr);
+
+ list_add(&entry->list, &hdev->blacklist);
+
+ return 0;
+}
+
+int hci_blacklist_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->blacklist) {
+ struct bdaddr_list *b;
+
+ b = list_entry(p, struct bdaddr_list, list);
+
+ list_del(p);
+ kfree(b);
+ }
+
+ return 0;
+}
+
+static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
+{
+ bdaddr_t bdaddr;
+ struct bdaddr_list *entry;
+
+ if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
+ return -EFAULT;
+
+ if (bacmp(&bdaddr, BDADDR_ANY) == 0)
+ return hci_blacklist_clear(hdev);
+
+ entry = hci_blacklist_lookup(hdev, &bdaddr);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
}
/* Ioctls that require bound socket */
@@ -232,12 +290,15 @@
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
- return hci_sock_blacklist_add(hdev, (void __user *) arg);
+ return hci_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
- return hci_sock_blacklist_del(hdev, (void __user *) arg);
+ return hci_blacklist_del(hdev, (void __user *) arg);
+
+ case HCISETAUTHINFO:
+ return hci_set_auth_info(hdev, (void __user *) arg);
default:
if (hdev->ioctl)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a6c3aa8..8775933 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -511,35 +511,6 @@
.release = single_release,
};
-static int auto_accept_delay_set(void *data, u64 val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock_bh(hdev);
-
- hdev->auto_accept_delay = val;
-
- hci_dev_unlock_bh(hdev);
-
- return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
- struct hci_dev *hdev = data;
-
- hci_dev_lock_bh(hdev);
-
- *val = hdev->auto_accept_delay;
-
- hci_dev_unlock_bh(hdev);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
-
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
@@ -574,8 +545,6 @@
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
- debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
- &auto_accept_delay_fops);
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fb68f34..04f7784 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -36,8 +36,6 @@
#include <linux/file.h>
#include <linux/init.h>
#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/input.h>
@@ -320,144 +318,24 @@
return hidp_queue_report(session, buf, rsize);
}
-static int hidp_get_raw_report(struct hid_device *hid,
- unsigned char report_number,
- unsigned char *data, size_t count,
- unsigned char report_type)
-{
- struct hidp_session *session = hid->driver_data;
- struct sk_buff *skb;
- size_t len;
- int numbered_reports = hid->report_enum[report_type].numbered;
-
- switch (report_type) {
- case HID_FEATURE_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
- break;
- case HID_INPUT_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
- break;
- case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
- break;
- default:
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&session->report_mutex))
- return -ERESTARTSYS;
-
- /* Set up our wait, and send the report request to the device. */
- session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
- session->waiting_report_number = numbered_reports ? report_number : -1;
- set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- data[0] = report_number;
- if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
- goto err_eio;
-
- /* Wait for the return of the report. The returned report
- gets put in session->report_return. */
- while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- int res;
-
- res = wait_event_interruptible_timeout(session->report_queue,
- !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags),
- 5*HZ);
- if (res == 0) {
- /* timeout */
- goto err_eio;
- }
- if (res < 0) {
- /* signal */
- goto err_restartsys;
- }
- }
-
- skb = session->report_return;
- if (skb) {
- len = skb->len < count ? skb->len : count;
- memcpy(data, skb->data, len);
-
- kfree_skb(skb);
- session->report_return = NULL;
- } else {
- /* Device returned a HANDSHAKE, indicating protocol error. */
- len = -EIO;
- }
-
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
-
- return len;
-
-err_restartsys:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return -ERESTARTSYS;
-err_eio:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return -EIO;
-}
-
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
unsigned char report_type)
{
- struct hidp_session *session = hid->driver_data;
- int ret;
-
switch (report_type) {
case HID_FEATURE_REPORT:
report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
break;
case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+ report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
break;
default:
return -EINVAL;
}
- if (mutex_lock_interruptible(&session->report_mutex))
- return -ERESTARTSYS;
-
- /* Set up our wait, and send the report request to the device. */
- set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count)) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Wait for the ACK from the device. */
- while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- int res;
-
- res = wait_event_interruptible_timeout(session->report_queue,
- !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags),
- 10*HZ);
- if (res == 0) {
- /* timeout */
- ret = -EIO;
- goto err;
- }
- if (res < 0) {
- /* signal */
- ret = -ERESTARTSYS;
- goto err;
- }
- }
-
- if (!session->output_report_success) {
- ret = -EIO;
- goto err;
- }
-
- ret = count;
-
-err:
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- mutex_unlock(&session->report_mutex);
- return ret;
+ data, count))
+ return -ENOMEM;
+ return count;
}
static void hidp_idle_timeout(unsigned long arg)
@@ -465,7 +343,7 @@
struct hidp_session *session = (struct hidp_session *) arg;
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ hidp_schedule(session);
}
static void hidp_set_timer(struct hidp_session *session)
@@ -484,22 +362,16 @@
unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
- session->output_report_success = 0; /* default condition */
switch (param) {
case HIDP_HSHK_SUCCESSFUL:
/* FIXME: Call into SET_ GET_ handlers here */
- session->output_report_success = 1;
break;
case HIDP_HSHK_NOT_READY:
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- wake_up_interruptible(&session->report_queue);
- }
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -518,12 +390,6 @@
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
-
- /* Wake up the waiting thread. */
- if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- wake_up_interruptible(&session->report_queue);
- }
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -536,16 +402,15 @@
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
+ /* Kill session thread */
atomic_inc(&session->terminate);
- wake_up_process(current);
+ hidp_schedule(session);
}
}
-/* Returns true if the passed-in skb should be freed by the caller. */
-static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
- int done_with_skb = 1;
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
switch (param) {
@@ -557,6 +422,7 @@
if (session->hid)
hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -568,27 +434,12 @@
__hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
-
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
- param == session->waiting_report_type) {
- if (session->waiting_report_number < 0 ||
- session->waiting_report_number == skb->data[0]) {
- /* hidp_get_raw_report() is waiting on this report. */
- session->report_return = skb;
- done_with_skb = 0;
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- wake_up_interruptible(&session->report_queue);
- }
- }
-
- return done_with_skb;
}
static void hidp_recv_ctrl_frame(struct hidp_session *session,
struct sk_buff *skb)
{
unsigned char hdr, type, param;
- int free_skb = 1;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -608,7 +459,7 @@
break;
case HIDP_TRANS_DATA:
- free_skb = hidp_process_data(session, skb, param);
+ hidp_process_data(session, skb, param);
break;
default:
@@ -617,8 +468,7 @@
break;
}
- if (free_skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -696,38 +546,54 @@
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
+ int vendor = 0x0000, product = 0x0000;
wait_queue_t ctrl_wait, intr_wait;
BT_DBG("session %p", session);
+ if (session->input) {
+ vendor = session->input->id.vendor;
+ product = session->input->id.product;
+ }
+
+ if (session->hid) {
+ vendor = session->hid->vendor;
+ product = session->hid->product;
+ }
+
+ daemonize("khidpd_%04x%04x", vendor, product);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
add_wait_queue(sk_sleep(intr_sk), &intr_wait);
- session->waiting_for_startup = 0;
- wake_up_interruptible(&session->startup_queue);
- set_current_state(TASK_INTERRUPTIBLE);
while (!atomic_read(&session->terminate)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (ctrl_sk->sk_state != BT_CONNECTED ||
intr_sk->sk_state != BT_CONNECTED)
break;
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_ctrl_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_ctrl_frame(session, skb);
+ else
+ kfree_skb(skb);
}
while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_intr_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
}
hidp_process_transmit(session);
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -764,7 +630,6 @@
up_write(&hidp_session_sem);
- kfree(session->rd_data);
kfree(session);
return 0;
}
@@ -842,8 +707,7 @@
err = input_register_device(input);
if (err < 0) {
- input_free_device(input);
- session->input = NULL;
+ hci_conn_put_device(session->conn);
return err;
}
@@ -902,8 +766,6 @@
.hidinput_input_event = hidp_hidinput_event,
};
-/* This function sets up the hid device. It does not add it
- to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
@@ -943,11 +805,18 @@
hid->dev.parent = hidp_get_device(session);
hid->ll_driver = &hidp_hid_driver;
- hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
+ err = hid_add_device(hid);
+ if (err < 0)
+ goto failed;
+
return 0;
+failed:
+ hid_destroy_device(hid);
+ session->hid = NULL;
+
fault:
kfree(session->rd_data);
session->rd_data = NULL;
@@ -958,7 +827,6 @@
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
{
struct hidp_session *session, *s;
- int vendor, product;
int err;
BT_DBG("");
@@ -983,10 +851,8 @@
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
- session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
- l2cap_pi(ctrl_sock->sk)->chan->imtu);
- session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
- l2cap_pi(intr_sock->sk)->chan->imtu);
+ session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu);
+ session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu);
BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
@@ -999,10 +865,6 @@
skb_queue_head_init(&session->ctrl_transmit);
skb_queue_head_init(&session->intr_transmit);
- mutex_init(&session->report_mutex);
- init_waitqueue_head(&session->report_queue);
- init_waitqueue_head(&session->startup_queue);
- session->waiting_for_startup = 1;
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
@@ -1022,36 +884,9 @@
hidp_set_timer(session);
- if (session->hid) {
- vendor = session->hid->vendor;
- product = session->hid->product;
- } else if (session->input) {
- vendor = session->input->id.vendor;
- product = session->input->id.product;
- } else {
- vendor = 0x0000;
- product = 0x0000;
- }
-
- session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
- vendor, product);
- if (IS_ERR(session->task)) {
- err = PTR_ERR(session->task);
+ err = kernel_thread(hidp_session, session, CLONE_KERNEL);
+ if (err < 0)
goto unlink;
- }
-
- while (session->waiting_for_startup) {
- wait_event_interruptible(session->startup_queue,
- !session->waiting_for_startup);
- }
-
- err = hid_add_device(session->hid);
- if (err < 0) {
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
- up_write(&hidp_session_sem);
- return err;
- }
if (session->input) {
hidp_send_ctrl_message(session,
@@ -1090,6 +925,7 @@
failed:
up_write(&hidp_session_sem);
+ input_free_device(session->input);
kfree(session);
return err;
}
@@ -1113,8 +949,13 @@
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
+ /* Wakeup user-space polling for socket errors */
+ session->intr_sock->sk->sk_err = EUNATCH;
+ session->ctrl_sock->sk->sk_err = EUNATCH;
+
+ /* Kill session thread */
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ hidp_schedule(session);
}
} else
err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index af1bcc8..28bb9ce 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -80,8 +80,6 @@
#define HIDP_VIRTUAL_CABLE_UNPLUG 0
#define HIDP_BOOT_PROTOCOL_MODE 1
#define HIDP_BLUETOOTH_VENDOR_ID 9
-#define HIDP_WAITING_FOR_RETURN 10
-#define HIDP_WAITING_FOR_SEND_ACK 11
struct hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
@@ -143,7 +141,6 @@
uint intr_mtu;
atomic_t terminate;
- struct task_struct *task;
unsigned char keys[8];
unsigned char leds;
@@ -157,22 +154,9 @@
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
- /* Used in hidp_get_raw_report() */
- int waiting_report_type; /* HIDP_DATA_RTYPE_* */
- int waiting_report_number; /* -1 for not numbered */
- struct mutex report_mutex;
- struct sk_buff *report_return;
- wait_queue_head_t report_queue;
-
- /* Used in hidp_output_raw_report() */
- int output_report_success; /* boolean */
-
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
-
- wait_queue_head_t startup_queue;
- int waiting_for_startup;
};
static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5a0ce73..e753c5f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1,6 +1,6 @@
/*
BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
@@ -46,6 +46,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/crc16.h>
+#include <linux/math64.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -55,317 +56,509 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/amp.h>
int disable_ertm;
+int enable_reconfig;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
-static LIST_HEAD(chan_list);
-static DEFINE_RWLOCK(chan_list_lock);
+struct workqueue_struct *_l2cap_wq;
+
+struct bt_sock_list l2cap_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
+static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u16 result);
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result);
+
+static void l2cap_amp_move_setup(struct sock *sk);
+static void l2cap_amp_move_success(struct sock *sk);
+static void l2cap_amp_move_revert(struct sock *sk);
+
+static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
-static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
- void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
-static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
-
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
+static int l2cap_answer_move_poll(struct sock *sk);
+static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
+static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
+static void l2cap_chan_ready(struct sock *sk);
+static void l2cap_conn_del(struct hci_conn *hcon, int err);
/* ---- L2CAP channels ---- */
-
-static inline void chan_hold(struct l2cap_chan *c)
+static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
{
- atomic_inc(&c->refcnt);
-}
-
-static inline void chan_put(struct l2cap_chan *c)
-{
- if (atomic_dec_and_test(&c->refcnt))
- kfree(c);
-}
-
-static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
-{
- struct l2cap_chan *c;
-
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->dcid == cid)
- return c;
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->dcid == cid)
+ break;
}
- return NULL;
-
+ return s;
}
-static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+/* Find channel with given DCID.
+ * Returns locked socket */
+static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
+ u16 cid)
{
- struct l2cap_chan *c;
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_dcid(l, cid);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
+}
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->scid == cid)
- return c;
+static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
+{
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->scid == cid)
+ break;
}
- return NULL;
+ return s;
}
/* Find channel with given SCID.
* Returns locked socket */
-static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
+static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
{
- struct l2cap_chan *c;
-
- read_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_scid(conn, cid);
- if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
- return c;
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_scid(l, cid);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
}
-static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
+static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
{
- struct l2cap_chan *c;
-
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->ident == ident)
- return c;
+ struct sock *s;
+ for (s = l->head; s; s = l2cap_pi(s)->next_c) {
+ if (l2cap_pi(s)->ident == ident)
+ break;
}
+ return s;
+}
+
+static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
+{
+ struct sock *s;
+ read_lock(&l->lock);
+ s = __l2cap_get_chan_by_ident(l, ident);
+ if (s)
+ bh_lock_sock(s);
+ read_unlock(&l->lock);
+ return s;
+}
+
+static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
+{
+ struct sk_buff *skb;
+
+ skb_queue_walk(head, skb) {
+ if (bt_cb(skb)->control.txseq == seq)
+ return skb;
+ }
+
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
{
- struct l2cap_chan *c;
+ u16 allocSize = 1;
+ int err = 0;
+ int i;
- read_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
- return c;
-}
+ /* Actual allocated size must be a power of 2 */
+ while (allocSize && allocSize <= size)
+ allocSize <<= 1;
+ if (!allocSize)
+ return -ENOMEM;
-static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct l2cap_chan *c;
+ seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
+ if (!seq_list->list)
+ return -ENOMEM;
- list_for_each_entry(c, &chan_list, global_l) {
- if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
- goto found;
- }
+ seq_list->size = allocSize;
+ seq_list->mask = allocSize - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < allocSize; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
- c = NULL;
-found:
- return c;
-}
-
-int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
-{
- int err;
-
- write_lock_bh(&chan_list_lock);
-
- if (psm && __l2cap_global_chan_by_addr(psm, src)) {
- err = -EADDRINUSE;
- goto done;
- }
-
- if (psm) {
- chan->psm = psm;
- chan->sport = psm;
- err = 0;
- } else {
- u16 p;
-
- err = -EINVAL;
- for (p = 0x1001; p < 0x1100; p += 2)
- if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
- chan->psm = cpu_to_le16(p);
- chan->sport = cpu_to_le16(p);
- err = 0;
- break;
- }
- }
-
-done:
- write_unlock_bh(&chan_list_lock);
return err;
}
-int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
{
- write_lock_bh(&chan_list_lock);
-
- chan->scid = scid;
-
- write_unlock_bh(&chan_list_lock);
-
- return 0;
+ kfree(seq_list->list);
}
-static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ BT_DBG("List empty");
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed quickly */
+ BT_DBG("Remove head");
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Non-head item must be found first */
+ u16 prev = seq_list->head;
+ BT_DBG("Find and remove");
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL) {
+ BT_DBG("seq %d not in list", (int) seq);
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+ }
+
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
+ u16 i;
+ for (i = 0; i < seq_list->size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
+
+ if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+ }
+}
+
+static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
+{
+ u16 packed;
+
+ packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
+ L2CAP_CTRL_REQSEQ;
+ packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
+ L2CAP_CTRL_FINAL;
+
+ if (control->frame_type == 's') {
+ packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
+ L2CAP_CTRL_POLL;
+ packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
+ L2CAP_CTRL_SUPERVISE;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
+ L2CAP_CTRL_SAR;
+ packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
+ L2CAP_CTRL_TXSEQ;
+ }
+
+ return packed;
+}
+
+static void __get_enhanced_control(u16 enhanced,
+ struct bt_l2cap_control *control)
+{
+ control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
+ L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enhanced & L2CAP_CTRL_FINAL) >>
+ L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
+ control->frame_type = 's';
+ control->poll = (enhanced & L2CAP_CTRL_POLL) >>
+ L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
+ L2CAP_CTRL_SUPERVISE_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ control->frame_type = 'i';
+ control->sar = (enhanced & L2CAP_CTRL_SAR) >>
+ L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
+ L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static u32 __pack_extended_control(struct bt_l2cap_control *control)
+{
+ u32 packed;
+
+ packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_REQSEQ;
+ packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
+ L2CAP_EXT_CTRL_FINAL;
+
+ if (control->frame_type == 's') {
+ packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
+ L2CAP_EXT_CTRL_POLL;
+ packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
+ L2CAP_EXT_CTRL_SUPERVISE;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
+ L2CAP_EXT_CTRL_SAR;
+ packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_TXSEQ;
+ }
+
+ return packed;
+}
+
+static void __get_extended_control(u32 extended,
+ struct bt_l2cap_control *control)
+{
+ control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
+ L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
+ L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ control->frame_type = 's';
+ control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
+ L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
+ L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ control->frame_type = 'i';
+ control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
+ L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
+ L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->ack_work);
+}
+
+static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
+ if (!delayed_work_pending(&pi->ack_work)) {
+ queue_delayed_work(_l2cap_wq, &pi->ack_work,
+ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
+ }
+}
+
+static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->retrans_work);
+}
+
+static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
+ __cancel_delayed_work(&pi->retrans_work);
+ queue_delayed_work(_l2cap_wq, &pi->retrans_work,
+ msecs_to_jiffies(pi->retrans_timeout));
+ }
+}
+
+static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ __cancel_delayed_work(&pi->monitor_work);
+}
+
+static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
+{
+ BT_DBG("pi %p", pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ __cancel_delayed_work(&pi->monitor_work);
+ if (pi->monitor_timeout) {
+ queue_delayed_work(_l2cap_wq, &pi->monitor_work,
+ msecs_to_jiffies(pi->monitor_timeout));
+ }
+}
+
+static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
{
u16 cid = L2CAP_CID_DYN_START;
for (; cid < L2CAP_CID_DYN_END; cid++) {
- if (!__l2cap_get_chan_by_scid(conn, cid))
+ if (!__l2cap_get_chan_by_scid(l, cid))
return cid;
}
return 0;
}
-static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
+static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
{
- BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+ sock_hold(sk);
- if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
- chan_hold(chan);
+ if (l->head)
+ l2cap_pi(l->head)->prev_c = sk;
+
+ l2cap_pi(sk)->next_c = l->head;
+ l2cap_pi(sk)->prev_c = NULL;
+ l->head = sk;
}
-static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
+static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
{
- BT_DBG("chan %p state %d", chan, chan->state);
+ struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
- if (timer_pending(timer) && del_timer(timer))
- chan_put(chan);
+ write_lock_bh(&l->lock);
+ if (sk == l->head)
+ l->head = next;
+
+ if (next)
+ l2cap_pi(next)->prev_c = prev;
+ if (prev)
+ l2cap_pi(prev)->next_c = next;
+ write_unlock_bh(&l->lock);
+
+ __sock_put(sk);
}
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
{
- chan->state = state;
- chan->ops->state_change(chan->data, state);
-}
+ struct l2cap_chan_list *l = &conn->chan_list;
-static void l2cap_chan_timeout(unsigned long arg)
-{
- struct l2cap_chan *chan = (struct l2cap_chan *) arg;
- struct sock *sk = chan->sk;
- int reason;
-
- BT_DBG("chan %p state %d", chan, chan->state);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- __set_chan_timer(chan, HZ / 5);
- bh_unlock_sock(sk);
- chan_put(chan);
- return;
- }
-
- if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (chan->state == BT_CONNECT &&
- chan->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- l2cap_chan_close(chan, reason);
-
- bh_unlock_sock(sk);
-
- chan->ops->close(chan->data);
- chan_put(chan);
-}
-
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
-{
- struct l2cap_chan *chan;
-
- chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
- if (!chan)
- return NULL;
-
- chan->sk = sk;
-
- write_lock_bh(&chan_list_lock);
- list_add(&chan->global_l, &chan_list);
- write_unlock_bh(&chan_list_lock);
-
- setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
-
- chan->state = BT_OPEN;
-
- atomic_set(&chan->refcnt, 1);
-
- return chan;
-}
-
-void l2cap_chan_destroy(struct l2cap_chan *chan)
-{
- write_lock_bh(&chan_list_lock);
- list_del(&chan->global_l);
- write_unlock_bh(&chan_list_lock);
-
- chan_put(chan);
-}
-
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
-{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
conn->disc_reason = 0x13;
- chan->conn = conn;
+ l2cap_pi(sk)->conn = conn;
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
+ if (!l2cap_pi(sk)->fixed_channel &&
+ (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
if (conn->hcon->type == LE_LINK) {
/* LE connection */
- chan->omtu = L2CAP_LE_DEFAULT_MTU;
- chan->scid = L2CAP_CID_LE_DATA;
- chan->dcid = L2CAP_CID_LE_DATA;
+ if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
+ if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
+
+ l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
+ l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
} else {
/* Alloc CID for connection-oriented socket */
- chan->scid = l2cap_alloc_cid(conn);
- chan->omtu = L2CAP_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
}
- } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
+ } else if (sk->sk_type == SOCK_DGRAM) {
/* Connectionless socket */
- chan->scid = L2CAP_CID_CONN_LESS;
- chan->dcid = L2CAP_CID_CONN_LESS;
- chan->omtu = L2CAP_DEFAULT_MTU;
- } else {
+ l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
+ l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
+ } else if (sk->sk_type == SOCK_RAW) {
/* Raw socket can send/recv signalling messages only */
- chan->scid = L2CAP_CID_SIGNALING;
- chan->dcid = L2CAP_CID_SIGNALING;
- chan->omtu = L2CAP_DEFAULT_MTU;
+ l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
+ l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
+ l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
}
+ /* Otherwise, do not set scid/dcid/omtu. These will be set up
+ * by l2cap_fixed_channel_config()
+ */
- chan_hold(chan);
-
- list_add(&chan->list, &conn->chan_l);
+ __l2cap_chan_link(l, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
-static void l2cap_chan_del(struct l2cap_chan *chan, int err)
+void l2cap_chan_del(struct sock *sk, int err)
{
- struct sock *sk = chan->sk;
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sock *parent = bt_sk(sk)->parent;
- __clear_chan_timer(chan);
+ l2cap_sock_clear_timer(sk);
- BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
+ BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
- /* Delete from channel list */
- write_lock_bh(&conn->chan_lock);
- list_del(&chan->list);
- write_unlock_bh(&conn->chan_lock);
- chan_put(chan);
-
- chan->conn = NULL;
- hci_conn_put(conn->hcon);
+ /* Unlink from channel list */
+ l2cap_chan_unlink(&conn->chan_list, sk);
+ l2cap_pi(sk)->conn = NULL;
+ if (!l2cap_pi(sk)->fixed_channel)
+ hci_conn_put(conn->hcon);
}
- l2cap_state_change(chan, BT_CLOSED);
+ if (l2cap_pi(sk)->ampcon) {
+ l2cap_pi(sk)->ampcon->l2cap_data = NULL;
+ l2cap_pi(sk)->ampcon = NULL;
+ if (l2cap_pi(sk)->ampchan) {
+ hci_chan_put(l2cap_pi(sk)->ampchan);
+ if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
+ l2cap_deaggregate(l2cap_pi(sk)->ampchan,
+ l2cap_pi(sk));
+ }
+ l2cap_pi(sk)->ampchan = NULL;
+ l2cap_pi(sk)->amp_id = 0;
+ }
+
+ sk->sk_state = BT_CLOSED;
sock_set_flag(sk, SOCK_ZAPPED);
if (err)
@@ -377,109 +570,24 @@
} else
sk->sk_state_change(sk);
- if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
- test_bit(CONF_INPUT_DONE, &chan->conf_state)))
- return;
+ skb_queue_purge(TX_QUEUE(sk));
- skb_queue_purge(&chan->tx_q);
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ if (l2cap_pi(sk)->sdu)
+ kfree_skb(l2cap_pi(sk)->sdu);
- if (chan->mode == L2CAP_MODE_ERTM) {
- struct srej_list *l, *tmp;
+ skb_queue_purge(SREJ_QUEUE(sk));
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- skb_queue_purge(&chan->srej_q);
-
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- list_del(&l->list);
- kfree(l);
- }
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
}
}
-static void l2cap_chan_cleanup_listen(struct sock *parent)
+static inline u8 l2cap_get_auth_type(struct sock *sk)
{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL))) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- __clear_chan_timer(chan);
- lock_sock(sk);
- l2cap_chan_close(chan, ECONNRESET);
- release_sock(sk);
- chan->ops->close(chan->data);
- }
-}
-
-void l2cap_chan_close(struct l2cap_chan *chan, int reason)
-{
- struct l2cap_conn *conn = chan->conn;
- struct sock *sk = chan->sk;
-
- BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
-
- switch (chan->state) {
- case BT_LISTEN:
- l2cap_chan_cleanup_listen(sk);
-
- l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
- conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, chan, reason);
- } else
- l2cap_chan_del(chan, reason);
- break;
-
- case BT_CONNECT2:
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
- conn->hcon->type == ACL_LINK) {
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- l2cap_state_change(chan, BT_DISCONN);
-
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
- }
-
- l2cap_chan_del(chan, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(chan, reason);
- break;
-
- default:
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
- }
-}
-
-static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
-{
- if (chan->chan_type == L2CAP_CHAN_RAW) {
- switch (chan->sec_level) {
+ if (sk->sk_type == SOCK_RAW) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_DEDICATED_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -487,16 +595,16 @@
default:
return HCI_AT_NO_BONDING;
}
- } else if (chan->psm == cpu_to_le16(0x0001)) {
- if (chan->sec_level == BT_SECURITY_LOW)
- chan->sec_level = BT_SECURITY_SDP;
+ } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- if (chan->sec_level == BT_SECURITY_HIGH)
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
return HCI_AT_NO_BONDING_MITM;
else
return HCI_AT_NO_BONDING;
} else {
- switch (chan->sec_level) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_HIGH:
return HCI_AT_GENERAL_BONDING_MITM;
case BT_SECURITY_MEDIUM:
@@ -508,17 +616,18 @@
}
/* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+static inline int l2cap_check_security(struct sock *sk)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
__u8 auth_type;
- auth_type = l2cap_get_auth_type(chan);
+ auth_type = l2cap_get_auth_type(sk);
- return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
+ return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
+ auth_type);
}
-static u8 l2cap_get_ident(struct l2cap_conn *conn)
+u8 l2cap_get_ident(struct l2cap_conn *conn)
{
u8 id;
@@ -540,7 +649,34 @@
return id;
}
-static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
+static void apply_fcs(struct sk_buff *skb)
+{
+ size_t len;
+ u16 partial_crc;
+ struct sk_buff *iter;
+ struct sk_buff *final_frag = skb;
+
+ if (skb_has_frag_list(skb))
+ len = skb_headlen(skb);
+ else
+ len = skb->len - L2CAP_FCS_SIZE;
+
+ partial_crc = crc16(0, (u8 *) skb->data, len);
+
+ skb_walk_frags(skb, iter) {
+ len = iter->len;
+ if (!iter->next)
+ len -= L2CAP_FCS_SIZE;
+
+ partial_crc = crc16(partial_crc, iter->data, len);
+ final_frag = iter;
+ }
+
+ put_unaligned_le16(partial_crc,
+ final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
+}
+
+void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
{
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
u8 flags;
@@ -555,97 +691,57 @@
else
flags = ACL_START;
- bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ bt_cb(skb)->force_active = 1;
- hci_send_acl(conn->hcon, skb, flags);
+ hci_send_acl(conn->hcon, NULL, skb, flags);
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static inline int __l2cap_no_conn_pending(struct sock *sk)
{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- struct l2cap_conn *conn = chan->conn;
- int count, hlen = L2CAP_HDR_SIZE + 2;
- u8 flags;
-
- if (chan->state != BT_CONNECTED)
- return;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
-
- BT_DBG("chan %p, control 0x%2.2x", chan, control);
-
- count = min_t(unsigned int, conn->mtu, hlen);
- control |= L2CAP_CTRL_FRAME_TYPE;
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
-
- if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= L2CAP_CTRL_POLL;
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
- if (!skb)
- return;
-
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
- put_unaligned_le16(control, skb_put(skb, 2));
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - 2);
- put_unaligned_le16(fcs, skb_put(skb, 2));
- }
-
- if (lmp_no_flush_capable(conn->hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
-
- hci_send_acl(chan->conn->hcon, skb, flags);
+ return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static void l2cap_send_conn_req(struct sock *sk)
{
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- } else
- control |= L2CAP_SUPER_RCV_READY;
+ struct l2cap_conn_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
- l2cap_send_sframe(chan, control);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_REQ, sizeof(req), &req);
}
-static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
+static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
{
- return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ struct l2cap_create_chan_req req;
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ req.psm = l2cap_pi(sk)->psm;
+ req.amp_id = amp_id;
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
+ l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
+
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
}
-static void l2cap_do_start(struct l2cap_chan *chan)
+static void l2cap_do_start(struct sock *sk)
{
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_check_security(chan) &&
- __l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
+ if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
- sizeof(req), &req);
+ if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
+ amp_create_physical(l2cap_pi(sk)->conn, sk);
+ else
+ l2cap_send_conn_req(sk);
}
} else {
struct l2cap_info_req req;
@@ -678,87 +774,87 @@
}
}
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
+void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
{
- struct sock *sk;
struct l2cap_disconn_req req;
if (!conn)
return;
- sk = chan->sk;
+ skb_queue_purge(TX_QUEUE(sk));
- if (chan->mode == L2CAP_MODE_ERTM) {
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
}
- req.dcid = cpu_to_le16(chan->dcid);
- req.scid = cpu_to_le16(chan->scid);
+ req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_DISCONN_REQ, sizeof(req), &req);
- l2cap_state_change(chan, BT_DISCONN);
+ sk->sk_state = BT_DISCONN;
sk->sk_err = err;
}
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan, *tmp;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock_del_list del, *tmp1, *tmp2;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ INIT_LIST_HEAD(&del.list);
- list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
+ read_lock(&l->lock);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
bh_unlock_sock(sk);
continue;
}
- if (chan->state == BT_CONNECT) {
- struct l2cap_conn_req req;
-
- if (!l2cap_check_security(chan) ||
- !__l2cap_no_conn_pending(chan)) {
+ if (sk->sk_state == BT_CONNECT) {
+ if (!l2cap_check_security(sk) ||
+ !__l2cap_no_conn_pending(sk)) {
bh_unlock_sock(sk);
continue;
}
- if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
- && test_bit(CONF_STATE2_DEVICE,
- &chan->conf_state)) {
- /* l2cap_chan_close() calls list_del(chan)
- * so release the lock */
- read_unlock(&conn->chan_lock);
- l2cap_chan_close(chan, ECONNRESET);
- read_lock(&conn->chan_lock);
+ if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
+ conn->feat_mask)
+ && l2cap_pi(sk)->conf_state &
+ L2CAP_CONF_STATE2_DEVICE) {
+ tmp1 = kzalloc(sizeof(struct sock_del_list),
+ GFP_ATOMIC);
+ tmp1->sk = sk;
+ list_add_tail(&tmp1->list, &del.list);
bh_unlock_sock(sk);
continue;
}
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
+ amp_create_physical(l2cap_pi(sk)->conn, sk);
+ else
+ l2cap_send_conn_req(sk);
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
- sizeof(req), &req);
-
- } else if (chan->state == BT_CONNECT2) {
+ } else if (sk->sk_state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- if (l2cap_check_security(chan)) {
+ if (l2cap_check_security(sk)) {
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -767,7 +863,7 @@
parent->sk_data_ready(parent, 0);
} else {
- l2cap_state_change(chan, BT_CONFIG);
+ sk->sk_state = BT_CONFIG;
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
}
@@ -776,75 +872,86 @@
rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
}
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
+ if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
+ l2cap_pi(sk)->amp_id) {
+ amp_accept_physical(conn,
+ l2cap_pi(sk)->amp_id, sk);
+ bh_unlock_sock(sk);
+ continue;
+ }
- if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
rsp.result != L2CAP_CR_SUCCESS) {
bh_unlock_sock(sk);
continue;
}
- set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
+
+ list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
+ bh_lock_sock(tmp1->sk);
+ __l2cap_sock_close(tmp1->sk, ECONNRESET);
+ bh_unlock_sock(tmp1->sk);
+ list_del(&tmp1->list);
+ kfree(tmp1);
+ }
}
/* Find socket with cid and source bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct sock *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
- read_lock(&chan_list_lock);
+ read_lock(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (state && c->state != state)
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
continue;
- if (c->scid == cid) {
+ if (l2cap_pi(sk)->scid == cid) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- c1 = c;
+ sk1 = sk;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
- return c1;
+ return node ? sk : sk1;
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
- struct sock *parent, *sk;
- struct l2cap_chan *chan, *pchan;
+ struct l2cap_chan_list *list = &conn->chan_list;
+ struct sock *parent, *uninitialized_var(sk);
BT_DBG("");
/* Check if we have socket listening on cid */
- pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
conn->src);
- if (!pchan)
+ if (!parent)
return;
- parent = pchan->sk;
-
bh_lock_sock(parent);
/* Check for backlog size */
@@ -853,102 +960,82 @@
goto clean;
}
- chan = pchan->ops->new_connection(pchan->data);
- if (!chan)
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
goto clean;
- sk = chan->sk;
-
- write_lock_bh(&conn->chan_lock);
+ write_lock_bh(&list->lock);
hci_conn_hold(conn->hcon);
+ l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ __l2cap_chan_add(conn, sk);
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state = BT_CONNECTED;
parent->sk_data_ready(parent, 0);
- write_unlock_bh(&conn->chan_lock);
+ write_unlock_bh(&list->lock);
clean:
bh_unlock_sock(parent);
}
-static void l2cap_chan_ready(struct sock *sk)
-{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- struct sock *parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-}
-
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock *sk;
BT_DBG("conn %p", conn);
if (!conn->hcon->out && conn->hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
- read_lock(&conn->chan_lock);
+ read_lock(&l->lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- if (conn->hcon->type == LE_LINK) {
- if (smp_conn_security(conn, chan->sec_level))
+ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
+ if (smp_conn_security(conn, l2cap_pi(sk)->sec_level))
l2cap_chan_ready(sk);
- } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- __clear_chan_timer(chan);
- l2cap_state_change(chan, BT_CONNECTED);
+ } else if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
+ l2cap_sock_clear_timer(sk);
+ sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
-
- } else if (chan->state == BT_CONNECT)
- l2cap_do_start(chan);
+ } else if (sk->sk_state == BT_CONNECT)
+ l2cap_do_start(sk);
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
}
/* Notify sockets that we cannot guaranty reliability anymore */
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan_list *l = &conn->chan_list;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ read_lock(&l->lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
- if (chan->force_reliable)
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ if (l2cap_pi(sk)->force_reliable)
sk->sk_err = err;
}
- read_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
}
static void l2cap_info_timeout(unsigned long arg)
@@ -961,38 +1048,6 @@
l2cap_conn_start(conn);
}
-static void l2cap_conn_del(struct hci_conn *hcon, int err)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
- struct l2cap_chan *chan, *l;
- struct sock *sk;
-
- if (!conn)
- return;
-
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
- kfree_skb(conn->rx_skb);
-
- /* Kill channels */
- list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
- sk = chan->sk;
- bh_lock_sock(sk);
- l2cap_chan_del(chan, err);
- bh_unlock_sock(sk);
- chan->ops->close(chan->data);
- }
-
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- del_timer_sync(&conn->info_timer);
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
- del_timer(&conn->security_timer);
-
- hcon->l2cap_data = NULL;
- kfree(conn);
-}
-
static void security_timeout(unsigned long arg)
{
struct l2cap_conn *conn = (void *) arg;
@@ -1027,9 +1082,7 @@
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
- rwlock_init(&conn->chan_lock);
-
- INIT_LIST_HEAD(&conn->chan_l);
+ rwlock_init(&conn->chan_list.lock);
if (hcon->type == LE_LINK)
setup_timer(&conn->security_timer, security_timeout,
@@ -1043,11 +1096,52 @@
return conn;
}
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_conn_del(struct hci_conn *hcon, int err)
{
- write_lock_bh(&conn->chan_lock);
- __l2cap_chan_add(conn, chan);
- write_unlock_bh(&conn->chan_lock);
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct sock *sk;
+ struct sock *next;
+
+ if (!conn)
+ return;
+
+ BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+ if ((conn->hcon == hcon) && (conn->rx_skb))
+ kfree_skb(conn->rx_skb);
+
+ BT_DBG("conn->hcon %p", conn->hcon);
+
+ /* Kill channels */
+ for (sk = conn->chan_list.head; sk; ) {
+ BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
+ if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
+ next = l2cap_pi(sk)->next_c;
+ bh_lock_sock(sk);
+ l2cap_chan_del(sk, err);
+ bh_unlock_sock(sk);
+ l2cap_sock_kill(sk);
+ sk = next;
+ } else
+ sk = l2cap_pi(sk)->next_c;
+ }
+
+ if (conn->hcon == hcon) {
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ del_timer_sync(&conn->info_timer);
+
+ hcon->l2cap_data = NULL;
+
+ kfree(conn);
+ }
+}
+
+static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct l2cap_chan_list *l = &conn->chan_list;
+ write_lock_bh(&l->lock);
+ __l2cap_chan_add(conn, sk);
+ write_unlock_bh(&l->lock);
}
/* ---- Socket interface ---- */
@@ -1055,39 +1149,35 @@
/* Find socket with psm and source bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
{
- struct l2cap_chan *c, *c1 = NULL;
+ struct sock *sk = NULL, *sk1 = NULL;
+ struct hlist_node *node;
- read_lock(&chan_list_lock);
+ read_lock(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (state && c->state != state)
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (state && sk->sk_state != state)
continue;
- if (c->psm == psm) {
+ if (l2cap_pi(sk)->psm == psm) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!bacmp(&bt_sk(sk)->src, src))
+ break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- c1 = c;
+ sk1 = sk;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
- return c1;
+ return node ? sk : sk1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan)
+int l2cap_do_connect(struct sock *sk)
{
- struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
@@ -1097,7 +1187,7 @@
int err;
BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ l2cap_pi(sk)->psm);
hdev = hci_get_route(dst, src);
if (!hdev)
@@ -1105,42 +1195,64 @@
hci_dev_lock_bh(hdev);
- auth_type = l2cap_get_auth_type(chan);
+ auth_type = l2cap_get_auth_type(sk);
- if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, 0, dst,
- chan->sec_level, auth_type);
- else
- hcon = hci_connect(hdev, ACL_LINK, 0, dst,
- chan->sec_level, auth_type);
+ if (l2cap_pi(sk)->fixed_channel) {
+ /* Fixed channels piggyback on existing ACL connections */
+ hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (!hcon || !hcon->l2cap_data) {
+ err = -ENOTCONN;
+ goto done;
+ }
- if (IS_ERR(hcon)) {
- err = PTR_ERR(hcon);
- goto done;
- }
+ conn = hcon->l2cap_data;
+ } else {
+ if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
+ hcon = hci_connect(hdev, LE_LINK, 0, dst,
+ l2cap_pi(sk)->sec_level, auth_type);
+ else
+ hcon = hci_connect(hdev, ACL_LINK, 0, dst,
+ l2cap_pi(sk)->sec_level, auth_type);
- conn = l2cap_conn_add(hcon, 0);
- if (!conn) {
- hci_conn_put(hcon);
- err = -ENOMEM;
- goto done;
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto done;
+ }
+
+ conn = l2cap_conn_add(hcon, 0);
+ if (!conn) {
+ hci_conn_put(hcon);
+ err = -ENOMEM;
+ goto done;
+ }
}
/* Update source addr of the socket */
bacpy(src, conn->src);
- l2cap_chan_add(conn, chan);
+ l2cap_chan_add(conn, sk);
- l2cap_state_change(chan, BT_CONNECT);
- __set_chan_timer(chan, sk->sk_sndtimeo);
+ BT_DBG("hcon->state %d", (int) hcon->state);
- if (hcon->state == BT_CONNECTED) {
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- __clear_chan_timer(chan);
- if (l2cap_check_security(chan))
- l2cap_state_change(chan, BT_CONNECTED);
- } else
- l2cap_do_start(chan);
+ if (l2cap_pi(sk)->fixed_channel) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else {
+ sk->sk_state = BT_CONNECT;
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ sk->sk_state_change(sk);
+
+ if (hcon->state == BT_CONNECTED) {
+ if (sk->sk_type != SOCK_SEQPACKET &&
+ sk->sk_type != SOCK_STREAM) {
+ l2cap_sock_clear_timer(sk);
+ if (l2cap_check_security(sk)) {
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ }
+ } else
+ l2cap_do_start(sk);
+ }
}
err = 0;
@@ -1153,14 +1265,15 @@
int __l2cap_wait_ack(struct sock *sk)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
int timeo = HZ/5;
add_wait_queue(sk_sleep(sk), &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- while (chan->unacked_frames > 0 && chan->conn) {
+ while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
+ atomic_read(&l2cap_pi(sk)->ertm_queued)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (!timeo)
timeo = HZ/5;
@@ -1172,7 +1285,6 @@
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
@@ -1183,287 +1295,322 @@
return err;
}
-static void l2cap_monitor_timeout(unsigned long arg)
+static void l2cap_ertm_tx_worker(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
- struct sock *sk = chan->sk;
+ struct l2cap_pinfo *pi =
+ container_of(work, struct l2cap_pinfo, tx_work);
+ struct sock *sk = (struct sock *)pi;
+ BT_DBG("%p", pi);
- BT_DBG("chan %p", chan);
-
- bh_lock_sock(sk);
- if (chan->retry_count >= chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- bh_unlock_sock(sk);
- return;
- }
-
- chan->retry_count++;
- __set_monitor_timer(chan);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ lock_sock(sk);
+ l2cap_ertm_send(sk);
+ release_sock(sk);
}
-static void l2cap_retrans_timeout(unsigned long arg)
+static void l2cap_skb_destructor(struct sk_buff *skb)
{
- struct l2cap_chan *chan = (void *) arg;
- struct sock *sk = chan->sk;
+ struct sock *sk = skb->sk;
+ int queued;
- BT_DBG("chan %p", chan);
-
- bh_lock_sock(sk);
- chan->retry_count = 1;
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
+ if (queued < L2CAP_MIN_ERTM_QUEUED)
+ queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
}
-static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
+void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
- while ((skb = skb_peek(&chan->tx_q)) &&
- chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
- break;
+ BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
- skb = skb_dequeue(&chan->tx_q);
- kfree_skb(skb);
+ if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
+ pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
+ BT_DBG("Sending on AMP connection %p %p",
+ pi->ampcon, pi->ampchan);
+ if (pi->ampchan)
+ hci_send_acl(pi->ampcon, pi->ampchan, skb,
+ ACL_COMPLETE);
+ else
+ kfree_skb(skb);
+ } else {
+ u16 flags;
- chan->unacked_frames--;
- }
+ bt_cb(skb)->force_active = pi->force_active;
+ BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
- if (!chan->unacked_frames)
- __clear_retrans_timer(chan);
-}
+ if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
+ !l2cap_pi(sk)->flushable)
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
-void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct hci_conn *hcon = chan->conn->hcon;
- u16 flags;
-
- BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
- if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
- hci_send_acl(hcon, skb, flags);
-}
-
-void l2cap_streaming_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
- u16 control, fcs;
-
- while ((skb = skb_dequeue(&chan->tx_q))) {
- control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
- control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
- put_unaligned_le16(fcs, skb->data + skb->len - 2);
- }
-
- l2cap_do_send(chan, skb);
-
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+ hci_send_acl(pi->conn->hcon, NULL, skb, flags);
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+int l2cap_ertm_send(struct sock *sk)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control *control;
+ int sent = 0;
- skb = skb_peek(&chan->tx_q);
- if (!skb)
- return;
+ BT_DBG("sk %p", sk);
- do {
- if (bt_cb(skb)->tx_seq == tx_seq)
- break;
-
- if (skb_queue_is_last(&chan->tx_q, skb))
- return;
-
- } while ((skb = skb_queue_next(&chan->tx_q, skb)));
-
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- return;
- }
-
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
-
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
- }
-
- l2cap_do_send(chan, tx_skb);
-}
-
-int l2cap_ertm_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
- int nsent = 0;
-
- if (chan->state != BT_CONNECTED)
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return 0;
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- break;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return 0;
+
+ while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
+ atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
+ (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
+
+ skb = sk->sk_send_head;
+
+ bt_cb(skb)->retries = 1;
+ control = &bt_cb(skb)->control;
+
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control->final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+ control->reqseq = pi->buffer_seq;
+ pi->last_acked_seq = pi->buffer_seq;
+ control->txseq = pi->next_tx_seq;
+
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
}
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(skb);
+
+ /* Clone after data has been modified. Data is assumed to be
+ read-only (for locking purposes) on cloned sk_buffs.
+ */
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ tx_skb->sk = sk;
+ tx_skb->destructor = l2cap_skb_destructor;
+ atomic_inc(&pi->ertm_queued);
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+ l2cap_do_send(sk, tx_skb);
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ BT_DBG("Sent txseq %d", (int)control->txseq);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ l2cap_ertm_start_retrans_timer(pi);
+ pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
+ pi->unacked_frames += 1;
+ pi->frames_sent += 1;
+ sent += 1;
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+ }
+
+ BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
+ (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
+ atomic_read(&pi->ertm_queued));
+
+ return sent;
+}
+
+int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
+{
+ struct sk_buff *skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control *control;
+ int sent = 0;
+
+ BT_DBG("sk %p, skbs %p", sk, skbs);
+
+ if (sk->sk_state != BT_CONNECTED)
+ return -ENOTCONN;
+
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return 0;
+
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+
+ BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
+ while (!skb_queue_empty(TX_QUEUE(sk))) {
+
+ skb = skb_dequeue(TX_QUEUE(sk));
+
+ BT_DBG("skb %p", skb);
+
+ bt_cb(skb)->retries = 1;
+ control = &bt_cb(skb)->control;
+
+ BT_DBG("control %p", control);
+
+ control->reqseq = 0;
+ control->txseq = pi->next_tx_seq;
+
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
}
- l2cap_do_send(chan, tx_skb);
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(skb);
- __set_retrans_timer(chan);
+ l2cap_do_send(sk, skb);
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+ BT_DBG("Sent txseq %d", (int)control->txseq);
- if (bt_cb(skb)->retries == 1)
- chan->unacked_frames++;
-
- chan->frames_sent++;
-
- if (skb_queue_is_last(&chan->tx_q, skb))
- chan->tx_send_head = NULL;
- else
- chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
-
- nsent++;
+ pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
+ pi->frames_sent += 1;
+ sent += 1;
}
- return nsent;
+ BT_DBG("Sent %d", sent);
+
+ return 0;
}
-static int l2cap_retransmit_frames(struct l2cap_chan *chan)
+static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
{
- int ret;
-
- if (!skb_queue_empty(&chan->tx_q))
- chan->tx_send_head = chan->tx_q.next;
-
- chan->next_tx_seq = chan->expected_ack_seq;
- ret = l2cap_ertm_send(chan);
- return ret;
-}
-
-static void l2cap_send_ack(struct l2cap_chan *chan)
-{
- u16 control = 0;
-
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- l2cap_send_sframe(chan, control);
- return;
+ while (len > 0) {
+ if (iv->iov_len) {
+ int copy = min_t(unsigned int, len, iv->iov_len);
+ memcpy(kdata, iv->iov_base, copy);
+ len -= copy;
+ kdata += copy;
+ iv->iov_base += copy;
+ iv->iov_len -= copy;
+ }
+ iv++;
}
- if (l2cap_ertm_send(chan) > 0)
- return;
-
- control |= L2CAP_SUPER_RCV_READY;
- l2cap_send_sframe(chan, control);
+ return 0;
}
-static void l2cap_send_srejtail(struct l2cap_chan *chan)
+static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
+ int len, int count, struct sk_buff *skb,
+ int reseg)
{
- struct srej_list *tail;
- u16 control;
-
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= L2CAP_CTRL_FINAL;
-
- tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- l2cap_send_sframe(chan, control);
-}
-
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff **frag;
+ struct sk_buff *final;
int err, sent = 0;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+ BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
+ msg, (int)len, (int)count, skb);
+
+ if (!conn)
+ return -ENOTCONN;
+
+ /* When resegmenting, data is copied from kernel space */
+ if (reseg) {
+ err = memcpy_fromkvec(skb_put(skb, count),
+ (struct kvec *) msg->msg_iov, count);
+ } else {
+ err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
+ count);
+ }
+
+ if (err)
return -EFAULT;
sent += count;
len -= count;
+ final = skb;
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ int skblen;
count = min_t(unsigned int, conn->mtu, len);
- *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
+ /* Add room for the FCS if it fits */
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
+ len + L2CAP_FCS_SIZE <= conn->mtu)
+ skblen = count + L2CAP_FCS_SIZE;
+ else
+ skblen = count;
+
+ /* Don't use bt_skb_send_alloc() while resegmenting, since
+ * it is not ok to block.
+ */
+ if (reseg) {
+ *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
+ if (*frag)
+ skb_set_owner_w(*frag, sk);
+ } else {
+ *frag = bt_skb_send_alloc(sk, skblen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ }
+
if (!*frag)
- return err;
- if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+ return -EFAULT;
+
+ /* When resegmenting, data is copied from kernel space */
+ if (reseg) {
+ err = memcpy_fromkvec(skb_put(*frag, count),
+ (struct kvec *) msg->msg_iov,
+ count);
+ } else {
+ err = memcpy_fromiovec(skb_put(*frag, count),
+ msg->msg_iov, count);
+ }
+
+ if (err)
return -EFAULT;
sent += count;
len -= count;
+ final = *frag;
+
frag = &(*frag)->next;
}
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
+ if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
+ if (reseg) {
+ *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
+ GFP_ATOMIC);
+ if (*frag)
+ skb_set_owner_w(*frag, sk);
+ } else {
+ *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err);
+ }
+
+ if (!*frag)
+ return -EFAULT;
+
+ final = *frag;
+ }
+
+ skb_put(final, L2CAP_FCS_SIZE);
+ }
+
return sent;
}
-struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
- struct sock *sk = chan->sk;
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE + 2;
struct l2cap_hdr *lh;
@@ -1478,11 +1625,11 @@
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1490,10 +1637,9 @@
return skb;
}
-struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
{
- struct sock *sk = chan->sk;
- struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE;
struct l2cap_hdr *lh;
@@ -1508,10 +1654,10 @@
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
if (unlikely(err < 0)) {
kfree_skb(skb);
return ERR_PTR(err);
@@ -1519,188 +1665,1028 @@
return skb;
}
-struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
+ struct msghdr *msg, size_t len,
+ u16 sdulen, int reseg)
{
- struct sock *sk = chan->sk;
- struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen;
+ int reserve = 0;
struct l2cap_hdr *lh;
+ u8 fcs = l2cap_pi(sk)->fcs;
- BT_DBG("sk %p len %d", sk, (int)len);
-
- if (!conn)
- return ERR_PTR(-ENOTCONN);
+ if (l2cap_pi(sk)->extended_control)
+ hlen = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ hlen = L2CAP_ENHANCED_HDR_SIZE;
if (sdulen)
- hlen += 2;
+ hlen += L2CAP_SDULEN_SIZE;
- if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ if (fcs == L2CAP_FCS_CRC16)
+ hlen += L2CAP_FCS_SIZE;
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
+ sk, msg, (int)len, (int)sdulen, hlen);
+
+ count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
+
+ /* Allocate extra headroom for Qualcomm PAL. This is only
+ * necessary in two places (here and when creating sframes)
+ * because only unfragmented iframes and sframes are sent
+ * using AMP controllers.
+ */
+ if (l2cap_pi(sk)->ampcon &&
+ l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
+ reserve = BT_SKB_RESERVE_80211;
+
+ /* Don't use bt_skb_send_alloc() while resegmenting, since
+ * it is not ok to block.
+ */
+ if (reseg) {
+ skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
+ if (skb)
+ skb_set_owner_w(skb, sk);
+ } else {
+ skb = bt_skb_send_alloc(sk, count + hlen + reserve,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ }
if (!skb)
return ERR_PTR(err);
+ if (reserve)
+ skb_reserve(skb, reserve);
+
+ bt_cb(skb)->control.fcs = fcs;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(control, skb_put(skb, 2));
- if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, 2));
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ /* Control header is populated later */
+ if (l2cap_pi(sk)->extended_control)
+ put_unaligned_le32(0, skb_put(skb, 4));
+ else
+ put_unaligned_le16(0, skb_put(skb, 2));
+
+ if (sdulen)
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
+
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
if (unlikely(err < 0)) {
+ BT_DBG("err %d", err);
kfree_skb(skb);
return ERR_PTR(err);
}
- if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, 2));
-
bt_cb(skb)->retries = 0;
return skb;
}
-int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
{
- struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u16 control;
- size_t size = 0;
+ struct l2cap_pinfo *pi;
+ struct sk_buff *acked_skb;
+ u16 ackseq;
- skb_queue_head_init(&sar_queue);
- control = L2CAP_SDU_START;
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ pi = l2cap_pi(sk);
- while (len > 0) {
- size_t buflen;
+ if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
+ return;
- if (len > chan->remote_mps) {
- control = L2CAP_SDU_CONTINUE;
- buflen = chan->remote_mps;
- } else {
- control = L2CAP_SDU_END;
- buflen = len;
+ BT_DBG("expected_ack_seq %d, unacked_frames %d",
+ (int) pi->expected_ack_seq, (int) pi->unacked_frames);
+
+ for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(ackseq, pi)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, TX_QUEUE(sk));
+ kfree_skb(acked_skb);
+ pi->unacked_frames--;
}
-
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
- if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
- return PTR_ERR(skb);
- }
-
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ pi->expected_ack_seq = reqseq;
+
+ if (pi->unacked_frames == 0)
+ l2cap_ertm_stop_retrans_timer(pi);
+
+ BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
}
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
{
struct sk_buff *skb;
- u16 control;
- int err;
+ int len;
+ int reserve = 0;
+ struct l2cap_hdr *lh;
- /* Connectionless channel */
- if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
- skb = l2cap_create_connless_pdu(chan, msg, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (l2cap_pi(sk)->extended_control)
+ len = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ len = L2CAP_ENHANCED_HDR_SIZE;
- l2cap_do_send(chan, skb);
- return len;
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+ len += L2CAP_FCS_SIZE;
+
+ /* Allocate extra headroom for Qualcomm PAL */
+ if (l2cap_pi(sk)->ampcon &&
+ l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
+ reserve = BT_SKB_RESERVE_80211;
+
+ skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
+
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (reserve)
+ skb_reserve(skb, reserve);
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
+
+ if (l2cap_pi(sk)->extended_control)
+ put_unaligned_le32(control, skb_put(skb, 4));
+ else
+ put_unaligned_le16(control, skb_put(skb, 2));
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > chan->omtu)
- return -EMSGSIZE;
+ return skb;
+}
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(chan, msg, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+static void l2cap_ertm_send_sframe(struct sock *sk,
+ struct bt_l2cap_control *control)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+ u32 control_field;
- l2cap_do_send(chan, skb);
- err = len;
- break;
+ BT_DBG("sk %p, control %p", sk, control);
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (control->frame_type != 's')
+ return;
- __skb_queue_tail(&chan->tx_q, skb);
+ pi = l2cap_pi(sk);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
+ BT_DBG("AMP error - attempted S-Frame send during AMP move");
+ return;
+ }
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
+ control->final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ }
+
+ if (control->super == L2CAP_SFRAME_RR)
+ pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
+ else if (control->super == L2CAP_SFRAME_RNR)
+ pi->conn_state |= L2CAP_CONN_SENT_RNR;
+
+ if (control->super != L2CAP_SFRAME_SREJ) {
+ pi->last_acked_seq = control->reqseq;
+ l2cap_ertm_stop_ack_timer(pi);
+ }
+
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
+ (int) control->final, (int) control->poll,
+ (int) control->super);
+
+ if (pi->extended_control)
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(sk, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(sk, skb);
+}
+
+static void l2cap_ertm_send_ack(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct bt_l2cap_control control;
+ u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
+ int threshold;
+
+ BT_DBG("sk %p", sk);
+ BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
+ (int)pi->buffer_seq);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+
+ if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
+ l2cap_ertm_stop_ack_timer(pi);
+ control.super = L2CAP_SFRAME_RNR;
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ } else {
+ if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ l2cap_ertm_send(sk);
+ /* If any i-frames were sent, they included an ack */
+ if (pi->buffer_seq == pi->last_acked_seq)
+ frames_to_ack = 0;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ /* Ack now if the tx window is 3/4ths full.
+ * Calculate without mul or div
+ */
+ threshold = pi->tx_win;
+ threshold += threshold << 1;
+ threshold >>= 2;
+
+ BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
+ threshold);
+
+ if (frames_to_ack >= threshold) {
+ l2cap_ertm_stop_ack_timer(pi);
+ control.super = L2CAP_SFRAME_RR;
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ frames_to_ack = 0;
+ }
+
+ if (frames_to_ack)
+ l2cap_ertm_start_ack_timer(pi);
+ }
+}
+
+static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+
+ BT_DBG("sk %p, poll %d", sk, (int) poll);
+
+ pi = l2cap_pi(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.poll = poll;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ control.super = L2CAP_SFRAME_RNR;
+ else
+ control.super = L2CAP_SFRAME_RR;
+
+ control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &control);
+}
+
+static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.final = 1;
+ control.reqseq = pi->buffer_seq;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ control.super = L2CAP_SFRAME_RNR;
+ l2cap_ertm_send_sframe(sk, &control);
+ }
+
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
+ l2cap_ertm_start_retrans_timer(pi);
+
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ /* Send pending iframes */
+ l2cap_ertm_send(sk);
+
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SFRAME_RR;
+ l2cap_ertm_send_sframe(sk, &control);
+ }
+}
+
+static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ u16 seq;
+
+ BT_DBG("sk %p, txseq %d", sk, (int)txseq);
+
+ pi = l2cap_pi(sk);
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+
+ for (seq = pi->expected_tx_seq; seq != txseq;
+ seq = __next_seq(seq, pi)) {
+ if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
+ control.reqseq = seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ l2cap_seq_list_append(&pi->srej_list, seq);
+ }
+ }
+
+ pi->expected_tx_seq = __next_seq(txseq, pi);
+}
+
+static void l2cap_ertm_send_srej_tail(struct sock *sk)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+ control.reqseq = pi->srej_list.tail;
+ l2cap_ertm_send_sframe(sk, &control);
+}
+
+static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("sk %p, txseq %d", sk, (int) txseq);
+
+ pi = l2cap_pi(sk);
+ memset(&control, 0, sizeof(control));
+ control.frame_type = 's';
+ control.super = L2CAP_SFRAME_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = pi->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&pi->srej_list);
+ if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
+ break;
+
+ control.reqseq = seq;
+ l2cap_ertm_send_sframe(sk, &control);
+ l2cap_seq_list_append(&pi->srej_list, seq);
+ } while (pi->srej_list.head != initial_head);
+}
+
+static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ BT_DBG("sk %p", sk);
+
+ pi->expected_tx_seq = pi->buffer_seq;
+ l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
+ skb_queue_purge(SREJ_QUEUE(sk));
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+}
+
+static int l2cap_ertm_tx_state_xmit(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_DATA_REQUEST:
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+ l2cap_ertm_send(sk);
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_ertm_abort_rx_srej_sent(sk);
+ }
+
+ l2cap_ertm_send_ack(sk);
+
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(pi->conn, pi,
+ pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_RESPONDER) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident,
+ pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ }
break;
}
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
+ (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
+ struct bt_l2cap_control local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.frame_type = 's';
+ local_control.super = L2CAP_SFRAME_RR;
+ local_control.poll = 1;
+ local_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &local_control);
+
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
}
-
- err = l2cap_ertm_send(chan);
- if (err >= 0)
- err = len;
-
break;
-
+ case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+ break;
+ case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ l2cap_ertm_stop_ack_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_ERTM_EVENT_RECV_FBIT:
+ /* Nothing to process */
+ break;
default:
- BT_DBG("bad state %1.1x", chan->mode);
- err = -EBADFD;
+ break;
}
return err;
}
+static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_DATA_REQUEST:
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_ertm_abort_rx_srej_sent(sk);
+ }
+
+ l2cap_ertm_send_ack(sk);
+
+ break;
+ case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
+ struct bt_l2cap_control local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.frame_type = 's';
+ local_control.super = L2CAP_SFRAME_RR;
+ local_control.poll = 1;
+ local_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &local_control);
+
+ pi->retry_count = 1;
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_ERTM_EVENT_RECV_FBIT:
+ if (control && control->final) {
+ l2cap_ertm_stop_monitor_timer(pi);
+ if (pi->unacked_frames > 0)
+ l2cap_ertm_start_retrans_timer(pi);
+ pi->retry_count = 0;
+ pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
+ if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
+ l2cap_ertm_send_rr_or_rnr(sk, 1);
+ l2cap_ertm_start_monitor_timer(pi);
+ pi->retry_count += 1;
+ } else
+ l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
+ sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
+
+ pi = l2cap_pi(sk);
+
+ switch (pi->tx_state) {
+ case L2CAP_ERTM_TX_STATE_XMIT:
+ err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
+ break;
+ case L2CAP_ERTM_TX_STATE_WAIT_F:
+ err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+
+ return err;
+}
+
+int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
+ struct msghdr *msg, size_t len, int reseg)
+{
+ struct sk_buff *skb;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
+
+ BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
+
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
+
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = l2cap_pi(sk)->conn->mtu;
+
+ /* Constrain BR/EDR PDU size to fit within the largest radio packet */
+ if (!l2cap_pi(sk)->ampcon)
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len) {
+ skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
+
+ BT_DBG("iframe skb %p", skb);
+
+ if (IS_ERR(skb)) {
+ __skb_queue_purge(seg_queue);
+ return PTR_ERR(skb);
+ }
+
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
+ }
+
+ return err;
+}
+
+static inline int is_initial_frame(u8 sar)
+{
+ return (sar == L2CAP_SAR_UNSEGMENTED ||
+ sar == L2CAP_SAR_START);
+}
+
+static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
+ size_t veclen)
+{
+ struct sk_buff *frag_iter;
+
+ BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
+
+ if (iv->iov_len + skb->len > veclen)
+ return -ENOMEM;
+
+ memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
+ iv->iov_len += skb->len;
+
+ skb_walk_frags(skb, frag_iter) {
+ if (iv->iov_len + skb->len > veclen)
+ return -ENOMEM;
+
+ BT_DBG("Copying %d bytes", (int)frag_iter->len);
+ memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
+ frag_iter->len);
+ iv->iov_len += frag_iter->len;
+ }
+
+ return 0;
+}
+
+int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
+{
+ void *buf;
+ int buflen;
+ int err = 0;
+ struct sk_buff *skb;
+ struct msghdr msg;
+ struct kvec iv;
+ struct sk_buff_head old_frames;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p", sk);
+
+ if (skb_queue_empty(queue))
+ return 0;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = (struct iovec *) &iv;
+
+ buflen = pi->omtu + L2CAP_FCS_SIZE;
+ buf = kzalloc(buflen, GFP_TEMPORARY);
+
+ if (!buf) {
+ BT_DBG("Could not allocate resegmentation buffer");
+ return -ENOMEM;
+ }
+
+ /* Move current frames off the original queue */
+ __skb_queue_head_init(&old_frames);
+ skb_queue_splice_tail_init(queue, &old_frames);
+
+ while (!skb_queue_empty(&old_frames)) {
+ struct sk_buff_head current_sdu;
+ u8 original_sar;
+
+ /* Reassemble each SDU from one or more PDUs */
+
+ iv.iov_base = buf;
+ iv.iov_len = 0;
+
+ skb = skb_peek(&old_frames);
+ original_sar = bt_cb(skb)->control.sar;
+
+ __skb_unlink(skb, &old_frames);
+
+ /* Append data to SDU */
+ if (pi->extended_control)
+ skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
+ else
+ skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
+
+ if (original_sar == L2CAP_SAR_START)
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
+
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
+ iv.iov_len -= L2CAP_FCS_SIZE;
+
+ /* Free skb */
+ kfree_skb(skb);
+
+ if (err)
+ break;
+
+ while (!skb_queue_empty(&old_frames) && !err) {
+ /* Check next frame */
+ skb = skb_peek(&old_frames);
+
+ if (is_initial_frame(bt_cb(skb)->control.sar))
+ break;
+
+ __skb_unlink(skb, &old_frames);
+
+ /* Append data to SDU */
+ if (pi->extended_control)
+ skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
+ else
+ skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
+
+ if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
+
+ if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
+ iv.iov_len -= L2CAP_FCS_SIZE;
+
+ /* Free skb */
+ kfree_skb(skb);
+ }
+
+ if (err)
+ break;
+
+ /* Segment data */
+
+ __skb_queue_head_init(¤t_sdu);
+
+ /* skbs for the SDU were just freed, but the
+ * resegmenting process could produce more, smaller
+ * skbs due to smaller PDUs and reduced HCI MTU. The
+ * overhead from the sk_buff structs could put us over
+ * the sk_sndbuf limit.
+ *
+ * Since this code is running in response to a
+ * received poll/final packet, it cannot block.
+ * Therefore, memory allocation needs to be allowed by
+ * falling back to bt_skb_alloc() (with
+ * skb_set_owner_w() to maintain sk_wmem_alloc
+ * correctly).
+ */
+ msg.msg_iovlen = iv.iov_len;
+ err = l2cap_segment_sdu(sk, ¤t_sdu, &msg,
+ msg.msg_iovlen, 1);
+
+ if (err || skb_queue_empty(¤t_sdu)) {
+ BT_DBG("Error %d resegmenting data for socket %p",
+ err, sk);
+ __skb_queue_purge(¤t_sdu);
+ break;
+ }
+
+ /* Fix up first PDU SAR bits */
+ if (!is_initial_frame(original_sar)) {
+ BT_DBG("Changing SAR bits, %d PDUs",
+ skb_queue_len(¤t_sdu));
+ skb = skb_peek(¤t_sdu);
+
+ if (skb_queue_len(¤t_sdu) == 1) {
+ /* Change SAR from 'unsegmented' to 'end' */
+ bt_cb(skb)->control.sar = L2CAP_SAR_END;
+ } else {
+ struct l2cap_hdr *lh;
+ size_t hdrlen;
+
+ /* Change SAR from 'start' to 'continue' */
+ bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
+
+ /* Start frames contain 2 bytes for
+ * sdulen and continue frames don't.
+ * Must rewrite header to eliminate
+ * sdulen and then adjust l2cap frame
+ * length.
+ */
+ if (pi->extended_control)
+ hdrlen = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ hdrlen = L2CAP_ENHANCED_HDR_SIZE;
+
+ memmove(skb->data + L2CAP_SDULEN_SIZE,
+ skb->data, hdrlen);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+ lh = (struct l2cap_hdr *)skb->data;
+ lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
+ L2CAP_SDULEN_SIZE);
+ }
+ }
+
+ /* Add to queue */
+ skb_queue_splice_tail(¤t_sdu, queue);
+ }
+
+ __skb_queue_purge(&old_frames);
+ if (err)
+ __skb_queue_purge(queue);
+
+ kfree(buf);
+
+ BT_DBG("Queue resegmented, err=%d", err);
+ return err;
+}
+
+static void l2cap_resegment_worker(struct work_struct *work)
+{
+ int err = 0;
+ struct l2cap_resegment_work *seg_work =
+ container_of(work, struct l2cap_resegment_work, work);
+ struct sock *sk = seg_work->sk;
+
+ kfree(seg_work);
+
+ BT_DBG("sk %p", sk);
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
+ release_sock(sk);
+ return;
+ }
+
+ err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
+
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+
+ if (skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = skb_peek(TX_QUEUE(sk));
+
+ if (err)
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
+ else
+ l2cap_ertm_send(sk);
+
+ release_sock(sk);
+}
+
+static int l2cap_setup_resegment(struct sock *sk)
+{
+ struct l2cap_resegment_work *seg_work;
+
+ BT_DBG("sk %p", sk);
+
+ if (skb_queue_empty(TX_QUEUE(sk)))
+ return 0;
+
+ seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
+ if (!seg_work)
+ return -ENOMEM;
+
+ INIT_WORK(&seg_work->work, l2cap_resegment_worker);
+ seg_work->sk = sk;
+
+ if (!queue_work(_l2cap_wq, &seg_work->work)) {
+ kfree(seg_work);
+ return -ENOMEM;
+ }
+
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
+
+ return 0;
+}
+
+static inline int l2cap_rmem_available(struct sock *sk)
+{
+ BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
+ atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
+ return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
+}
+
+static inline int l2cap_rmem_full(struct sock *sk)
+{
+ BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
+ atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
+ return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
+}
+
+void l2cap_amp_move_init(struct sock *sk)
+{
+ BT_DBG("sk %p", sk);
+
+ if (!l2cap_pi(sk)->conn)
+ return;
+
+ if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
+ return;
+
+ if (l2cap_pi(sk)->amp_id == 0) {
+ if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
+ return;
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
+ amp_create_physical(l2cap_pi(sk)->conn, sk);
+ } else {
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
+ l2cap_pi(sk)->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ l2cap_pi(sk)->amp_move_id = 0;
+ l2cap_amp_move_setup(sk);
+ l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
+ l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+}
+
+static void l2cap_chan_ready(struct sock *sk)
+{
+ struct sock *parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ l2cap_pi(sk)->conf_state = 0;
+ l2cap_sock_clear_timer(sk);
+
+ if (!parent) {
+ /* Outgoing channel.
+ * Wake up socket sleeping on connect.
+ */
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+ } else {
+ /* Incoming channel.
+ * Wake up socket sleeping on accept.
+ */
+ parent->sk_data_ready(parent, 0);
+ }
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
+ struct l2cap_chan_list *l = &conn->chan_list;
struct sk_buff *nskb;
- struct l2cap_chan *chan;
+ struct sock *sk;
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
- if (chan->chan_type != L2CAP_CHAN_RAW)
+ read_lock(&l->lock);
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ if (sk->sk_type != SOCK_RAW)
continue;
/* Don't send frame to the socket it came from */
@@ -1710,10 +2696,10 @@
if (!nskb)
continue;
- if (chan->ops->recv(chan->data, nskb))
+ if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
- read_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
}
/* ---- L2CAP signalling commands ---- */
@@ -1724,12 +2710,13 @@
struct l2cap_cmd_hdr *cmd;
struct l2cap_hdr *lh;
int len, count;
+ unsigned int mtu = conn->hcon->hdev->acl_mtu;
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
conn, code, ident, dlen);
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
- count = min_t(unsigned int, conn->mtu, len);
+ count = min_t(unsigned int, mtu, len);
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
@@ -1759,7 +2746,7 @@
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
- count = min_t(unsigned int, conn->mtu, len);
+ count = min_t(unsigned int, mtu, len);
*frag = bt_skb_alloc(count, GFP_ATOMIC);
if (!*frag)
@@ -1843,37 +2830,157 @@
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_ack_timeout(unsigned long arg)
+static void l2cap_ertm_ack_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, ack_work);
+ struct sock *sk = (struct sock *)pi;
+ u16 frames_to_ack;
- bh_lock_sock(chan->sk);
- l2cap_send_ack(chan);
- bh_unlock_sock(chan->sk);
+ BT_DBG("sk %p", sk);
+
+ if (!sk)
+ return;
+
+ lock_sock(sk);
+
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
+ return;
+ }
+
+ frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
+ l2cap_pi(sk)->last_acked_seq,
+ l2cap_pi(sk));
+
+ if (frames_to_ack)
+ l2cap_ertm_send_rr_or_rnr(sk, 0);
+
+ release_sock(sk);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static void l2cap_ertm_retrans_timeout(struct work_struct *work)
{
- struct sock *sk = chan->sk;
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, retrans_work);
+ struct sock *sk = (struct sock *)pi;
- chan->expected_ack_seq = 0;
- chan->unacked_frames = 0;
- chan->buffer_seq = 0;
- chan->num_acked = 0;
- chan->frames_sent = 0;
+ BT_DBG("sk %p", sk);
- setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
- (unsigned long) chan);
- setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
- (unsigned long) chan);
- setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
+ if (!sk)
+ return;
- skb_queue_head_init(&chan->srej_q);
+ lock_sock(sk);
- INIT_LIST_HEAD(&chan->srej_l);
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
+ return;
+ }
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
+ release_sock(sk);
+}
- sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
+static void l2cap_ertm_monitor_timeout(struct work_struct *work)
+{
+ struct delayed_work *delayed =
+ container_of(work, struct delayed_work, work);
+ struct l2cap_pinfo *pi =
+ container_of(delayed, struct l2cap_pinfo, monitor_work);
+ struct sock *sk = (struct sock *)pi;
+
+ BT_DBG("sk %p", sk);
+
+ if (!sk)
+ return;
+
+ lock_sock(sk);
+
+ if (!l2cap_pi(sk)->conn) {
+ release_sock(sk);
+ return;
+ }
+
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
+
+ release_sock(sk);
+}
+
+static inline void l2cap_ertm_init(struct sock *sk)
+{
+ l2cap_pi(sk)->next_tx_seq = 0;
+ l2cap_pi(sk)->expected_tx_seq = 0;
+ l2cap_pi(sk)->expected_ack_seq = 0;
+ l2cap_pi(sk)->unacked_frames = 0;
+ l2cap_pi(sk)->buffer_seq = 0;
+ l2cap_pi(sk)->frames_sent = 0;
+ l2cap_pi(sk)->last_acked_seq = 0;
+ l2cap_pi(sk)->sdu = NULL;
+ l2cap_pi(sk)->sdu_last_frag = NULL;
+ l2cap_pi(sk)->sdu_len = 0;
+ atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
+
+ l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+
+ BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
+ l2cap_pi(sk)->rx_state);
+
+ l2cap_pi(sk)->amp_id = 0;
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ l2cap_pi(sk)->amp_move_reqseq = 0;
+ l2cap_pi(sk)->amp_move_event = 0;
+
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
+ l2cap_ertm_retrans_timeout);
+ INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
+ l2cap_ertm_monitor_timeout);
+ INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
+ skb_queue_head_init(SREJ_QUEUE(sk));
+ skb_queue_head_init(TX_QUEUE(sk));
+
+ l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
+ l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
+ l2cap_pi(sk)->remote_tx_win);
+}
+
+void l2cap_ertm_destruct(struct sock *sk)
+{
+ l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
+ l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
+}
+
+void l2cap_ertm_shutdown(struct sock *sk)
+{
+ l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
+ l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
+ l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
+}
+
+void l2cap_ertm_recv_done(struct sock *sk)
+{
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
+ release_sock(sk);
+ return;
+ }
+
+ /* Consume any queued incoming frames and update local busy status */
+ if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
+ l2cap_ertm_rx_queued_iframes(sk))
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
+ else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ l2cap_rmem_available(sk))
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
+
+ release_sock(sk);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -1889,40 +2996,183 @@
}
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
{
- struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = chan->mode };
- void *ptr = req->data;
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
+ (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
+ pi->extended_control = 1;
+ } else {
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->extended_control = 0;
+ }
+}
+
+static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
+ struct hci_ext_fs *new,
+ struct hci_ext_fs *agg)
+{
+ *agg = *cur;
+ if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
+ /* current flow spec has known rate */
+ if ((new->max_sdu == 0xFFFF) ||
+ (new->sdu_arr_time == 0xFFFFFFFF)) {
+ /* new fs has unknown rate, so aggregate is unknown */
+ agg->max_sdu = 0xFFFF;
+ agg->sdu_arr_time = 0xFFFFFFFF;
+ } else {
+ /* new fs has known rate, so aggregate is known */
+ u64 cur_rate;
+ u64 new_rate;
+ cur_rate = cur->max_sdu * 1000000ULL;
+ if (cur->sdu_arr_time)
+ cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
+ new_rate = new->max_sdu * 1000000ULL;
+ if (new->sdu_arr_time)
+ new_rate = div_u64(new_rate, new->sdu_arr_time);
+ cur_rate = cur_rate + new_rate;
+ agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
+ cur_rate);
+ }
+ }
+}
+
+static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
+{
+ struct hci_ext_fs tx_fs;
+ struct hci_ext_fs rx_fs;
BT_DBG("chan %p", chan);
- if (chan->num_conf_req || chan->num_conf_rsp)
+ if (((chan->tx_fs.max_sdu == 0xFFFF) ||
+ (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
+ ((chan->rx_fs.max_sdu == 0xFFFF) ||
+ (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
+ return 0;
+
+ l2cap_aggregate_fs(&chan->tx_fs,
+ (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
+ l2cap_aggregate_fs(&chan->rx_fs,
+ (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
+ hci_chan_modify(chan, &tx_fs, &rx_fs);
+ return 1;
+}
+
+static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
+ struct hci_ext_fs *old,
+ struct hci_ext_fs *agg)
+{
+ *agg = *cur;
+ if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
+ u64 cur_rate;
+ u64 old_rate;
+ cur_rate = cur->max_sdu * 1000000ULL;
+ if (cur->sdu_arr_time)
+ cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
+ old_rate = old->max_sdu * 1000000ULL;
+ if (old->sdu_arr_time)
+ old_rate = div_u64(old_rate, old->sdu_arr_time);
+ cur_rate = cur_rate - old_rate;
+ agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
+ cur_rate);
+ }
+}
+
+static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
+{
+ struct hci_ext_fs tx_fs;
+ struct hci_ext_fs rx_fs;
+
+ BT_DBG("chan %p", chan);
+
+ if (((chan->tx_fs.max_sdu == 0xFFFF) ||
+ (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
+ ((chan->rx_fs.max_sdu == 0xFFFF) ||
+ (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
+ return 0;
+
+ l2cap_deaggregate_fs(&chan->tx_fs,
+ (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
+ l2cap_deaggregate_fs(&chan->rx_fs,
+ (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
+ hci_chan_modify(chan, &tx_fs, &rx_fs);
+ return 1;
+}
+
+static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
+{
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+ struct hci_chan *chan;
+
+ hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
+ if (!hdev)
+ return NULL;
+
+ BT_DBG("hdev %s", hdev->name);
+
+ hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
+ if (!hcon)
+ return NULL;
+
+ chan = hci_chan_list_lookup_id(hdev, hcon->handle);
+ if (chan) {
+ l2cap_aggregate(chan, pi);
+ hci_chan_hold(chan);
+ return chan;
+ }
+
+ if (bt_sk(pi)->parent) {
+ /* Incoming connection */
+ chan = hci_chan_accept(hcon,
+ (struct hci_ext_fs *) &pi->local_fs,
+ (struct hci_ext_fs *) &pi->remote_fs);
+ } else {
+ /* Outgoing connection */
+ chan = hci_chan_create(hcon,
+ (struct hci_ext_fs *) &pi->local_fs,
+ (struct hci_ext_fs *) &pi->remote_fs);
+ }
+ return chan;
+}
+
+int l2cap_build_conf_req(struct sock *sk, void *data)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+ void *ptr = req->data;
+
+ BT_DBG("sk %p", sk);
+
+ if (pi->num_conf_req || pi->num_conf_rsp)
goto done;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
+ if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
break;
/* fall through */
default:
- chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
+ pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
break;
}
done:
- if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_BASIC:
- if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
- !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
+ !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
break;
- rfc.mode = L2CAP_MODE_BASIC;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
@@ -1934,71 +3184,157 @@
break;
case L2CAP_MODE_ERTM:
- rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = chan->tx_win;
- rfc.max_transmit = chan->max_tx;
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
+ l2cap_setup_txwin(pi);
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
+ else
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
- if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
+ if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
+ pi->extended_control) {
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
+ pi->tx_win);
+ }
+
+ if (pi->amp_id) {
+ /* default best effort extended flow spec */
+ struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
+ sizeof(fs), (unsigned long) &fs);
+ }
+
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
break;
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
}
break;
case L2CAP_MODE_STREAMING:
- rfc.mode = L2CAP_MODE_STREAMING;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
- if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
+ if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
+ pi->extended_control) {
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
+ }
+
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
break;
- if (chan->fcs == L2CAP_FCS_NONE ||
- test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
- chan->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
}
break;
}
- req->dcid = cpu_to_le16(chan->dcid);
+ req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0);
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+
+static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = pi->mode };
+ void *ptr = req->data;
+ u32 be_flush_to;
+
+ BT_DBG("sk %p", sk);
+
+ /* convert to milliseconds, round up */
+ be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
+
+ switch (pi->mode) {
+ case L2CAP_MODE_ERTM:
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.txwin_size = pi->tx_win;
+ rfc.max_transmit = pi->max_tx;
+ if (pi->amp_move_id) {
+ rfc.retrans_timeout =
+ cpu_to_le16((3 * be_flush_to) + 500);
+ rfc.monitor_timeout =
+ cpu_to_le16((3 * be_flush_to) + 500);
+ } else {
+ rfc.retrans_timeout =
+ cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout =
+ cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ }
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+ if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
+ rfc.max_pdu_size = cpu_to_le16(pi->imtu);
+
+ break;
+
+ default:
+ return -ECONNREFUSED;
+ }
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+
+ if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
+
+ /* TODO assign fcs for br/edr based on socket config option */
+ if (pi->amp_move_id)
+ pi->local_conf.fcs = L2CAP_FCS_NONE;
+ else
+ pi->local_conf.fcs = L2CAP_FCS_CRC16;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+ pi->local_conf.fcs);
+
+ pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
+ }
+
+ req->dcid = cpu_to_le16(pi->dcid);
+ req->flags = cpu_to_le16(0);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_req(struct sock *sk, void *data)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- void *req = chan->conf_req;
- int len = chan->conf_len;
+ void *req = pi->conf_req;
+ int len = pi->conf_len;
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_ext_fs fs;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
- BT_DBG("chan %p", chan);
+ BT_DBG("sk %p", sk);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -2012,10 +3348,16 @@
break;
case L2CAP_CONF_FLUSH_TO:
- chan->flush_to = val;
+ pi->flush_to = val;
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ result = L2CAP_CONF_UNACCEPT;
+ else
+ pi->remote_conf.flush_to = val;
break;
case L2CAP_CONF_QOS:
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ result = L2CAP_CONF_UNACCEPT;
break;
case L2CAP_CONF_RFC:
@@ -2025,8 +3367,42 @@
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
- set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
+ pi->remote_conf.fcs = val;
+ break;
+ case L2CAP_CONF_EXT_FS:
+ if (olen == sizeof(fs)) {
+ pi->conf_state |= L2CAP_CONF_EFS_RECV;
+ if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
+ result = L2CAP_CONF_UNACCEPT;
+ break;
+ }
+ memcpy(&fs, (void *) val, olen);
+ if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
+ result = L2CAP_CONF_FLOW_SPEC_REJECT;
+ break;
+ }
+ pi->remote_conf.flush_to =
+ le32_to_cpu(fs.flush_to);
+ pi->remote_fs.id = fs.id;
+ pi->remote_fs.type = fs.type;
+ pi->remote_fs.max_sdu =
+ le16_to_cpu(fs.max_sdu);
+ pi->remote_fs.sdu_arr_time =
+ le32_to_cpu(fs.sdu_arr_time);
+ pi->remote_fs.acc_latency =
+ le32_to_cpu(fs.acc_latency);
+ pi->remote_fs.flush_to =
+ le32_to_cpu(fs.flush_to);
+ }
+ break;
+
+ case L2CAP_CONF_EXT_WINDOW:
+ pi->extended_control = 1;
+ pi->remote_tx_win = val;
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
+ pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
break;
default:
@@ -2039,30 +3415,30 @@
}
}
- if (chan->num_conf_rsp || chan->num_conf_req > 1)
+ if (pi->num_conf_rsp || pi->num_conf_req > 1)
goto done;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_STREAMING:
case L2CAP_MODE_ERTM:
- if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
- chan->mode = l2cap_select_mode(rfc.mode,
- chan->conn->feat_mask);
+ if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
+ pi->mode = l2cap_select_mode(rfc.mode,
+ pi->conn->feat_mask);
break;
}
- if (chan->mode != rfc.mode)
+ if (pi->mode != rfc.mode)
return -ECONNREFUSED;
break;
}
done:
- if (chan->mode != rfc.mode) {
+ if (pi->mode != rfc.mode) {
result = L2CAP_CONF_UNACCEPT;
- rfc.mode = chan->mode;
+ rfc.mode = pi->mode;
- if (chan->num_conf_rsp == 1)
+ if (pi->num_conf_rsp == 1)
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -2070,52 +3446,58 @@
}
+ if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
+ !(pi->conf_state & L2CAP_CONF_EFS_RECV))
+ return -ECONNREFUSED;
+
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
- if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ if (mtu < L2CAP_DEFAULT_MIN_MTU) {
result = L2CAP_CONF_UNACCEPT;
- else {
- chan->omtu = mtu;
- set_bit(CONF_MTU_DONE, &chan->conf_state);
+ pi->omtu = L2CAP_DEFAULT_MIN_MTU;
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ else {
+ pi->omtu = mtu;
+ pi->conf_state |= L2CAP_CONF_MTU_DONE;
+ }
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
- chan->fcs = L2CAP_FCS_NONE;
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->fcs = L2CAP_FCS_NONE;
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
break;
case L2CAP_MODE_ERTM:
- chan->remote_tx_win = rfc.txwin_size;
- chan->remote_max_tx = rfc.max_transmit;
+ if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
+ pi->remote_tx_win = rfc.txwin_size;
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ pi->remote_max_tx = rfc.max_transmit;
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+ cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
+ sizeof(fs), (unsigned long) &fs);
+
break;
case L2CAP_MODE_STREAMING:
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
-
- set_bit(CONF_MODE_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
@@ -2126,28 +3508,190 @@
result = L2CAP_CONF_UNACCEPT;
memset(&rfc, 0, sizeof(rfc));
- rfc.mode = chan->mode;
+ rfc.mode = pi->mode;
+ }
+
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
+ !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
+ pi->conf_state |= L2CAP_CONF_PEND_SENT;
+ result = L2CAP_CONF_PENDING;
+
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
+ pi->amp_id) {
+ struct hci_chan *chan;
+ /* Trigger logical link creation only on AMP */
+
+ chan = l2cap_chan_admit(pi->amp_id, pi);
+ if (!chan)
+ return -ECONNREFUSED;
+
+ chan->l2cap_sk = sk;
+ if (chan->state == BT_CONNECTED)
+ l2cap_create_cfm(chan, 0);
+ }
}
if (result == L2CAP_CONF_SUCCESS)
- set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
}
- rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->scid = cpu_to_le16(pi->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(0x0000);
return ptr - data;
}
-static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
+static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
+ void *req = pi->conf_req;
+ int len = pi->conf_len;
+ int type, hint, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_ext_fs fs;
+ u16 mtu = pi->omtu;
+ u16 tx_win = pi->remote_tx_win;
+ u16 result = L2CAP_CONF_SUCCESS;
+
+ BT_DBG("sk %p", sk);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+
+ hint = type & L2CAP_CONF_HINT;
+ type &= L2CAP_CONF_MASK;
+
+ switch (type) {
+ case L2CAP_CONF_MTU:
+ mtu = val;
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ if (pi->amp_move_id)
+ result = L2CAP_CONF_UNACCEPT;
+ else
+ pi->remote_conf.flush_to = val;
+ break;
+
+ case L2CAP_CONF_QOS:
+ if (pi->amp_move_id)
+ result = L2CAP_CONF_UNACCEPT;
+ break;
+
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *) val, olen);
+ if (pi->mode != rfc.mode ||
+ rfc.mode == L2CAP_MODE_BASIC)
+ result = L2CAP_CONF_UNACCEPT;
+ break;
+
+ case L2CAP_CONF_FCS:
+ pi->remote_conf.fcs = val;
+ break;
+
+ case L2CAP_CONF_EXT_FS:
+ if (olen == sizeof(fs)) {
+ memcpy(&fs, (void *) val, olen);
+ if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
+ result = L2CAP_CONF_FLOW_SPEC_REJECT;
+ else {
+ pi->remote_conf.flush_to =
+ le32_to_cpu(fs.flush_to);
+ }
+ }
+ break;
+
+ case L2CAP_CONF_EXT_WINDOW:
+ tx_win = val;
+ break;
+
+ default:
+ if (hint)
+ break;
+
+ result = L2CAP_CONF_UNKNOWN;
+ *((u8 *) ptr++) = type;
+ break;
+ }
+ }
+
+ BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
+ result, pi->mode, rfc.mode);
+
+ if (result == L2CAP_CONF_SUCCESS) {
+ /* Configure output options and let the other side know
+ * which ones we don't like. */
+
+ /* Don't allow mtu to decrease. */
+ if (mtu < pi->omtu)
+ result = L2CAP_CONF_UNACCEPT;
+
+ BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+
+ /* Don't allow extended transmit window to change. */
+ if (tx_win != pi->remote_tx_win) {
+ result = L2CAP_CONF_UNACCEPT;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
+ pi->remote_tx_win);
+ }
+
+ if (rfc.mode == L2CAP_MODE_ERTM) {
+ pi->remote_conf.retrans_timeout =
+ le16_to_cpu(rfc.retrans_timeout);
+ pi->remote_conf.monitor_timeout =
+ le16_to_cpu(rfc.monitor_timeout);
+
+ BT_DBG("remote conf monitor timeout %d",
+ pi->remote_conf.monitor_timeout);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ }
+
+ }
+
+ if (result != L2CAP_CONF_SUCCESS)
+ goto done;
+
+ pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
+ pi->flush_to = pi->remote_conf.flush_to;
+ pi->retrans_timeout = pi->remote_conf.retrans_timeout;
+
+ if (pi->amp_move_id)
+ pi->monitor_timeout = pi->remote_conf.monitor_timeout;
+ else
+ pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ BT_DBG("mode %d monitor timeout %d",
+ pi->mode, pi->monitor_timeout);
+
+ }
+
+done:
+ rsp->scid = cpu_to_le16(pi->dcid);
+ rsp->result = cpu_to_le16(result);
+ rsp->flags = cpu_to_le16(0x0000);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
void *ptr = req->data;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
- BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+ BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -2156,101 +3700,94 @@
case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
- chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ pi->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
- chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ pi->imtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
break;
case L2CAP_CONF_FLUSH_TO:
- chan->flush_to = val;
+ pi->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, pi->flush_to);
break;
case L2CAP_CONF_RFC:
if (olen == sizeof(rfc))
memcpy(&rfc, (void *)val, olen);
- if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
- rfc.mode != chan->mode)
+ if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
+ rfc.mode != pi->mode)
return -ECONNREFUSED;
- chan->fcs = 0;
+ pi->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
break;
+
+ case L2CAP_CONF_EXT_WINDOW:
+ pi->tx_win = val;
+
+ if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
+ pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
+ 2, pi->tx_win);
+ break;
+
+ default:
+ break;
}
}
- if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
+ if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
return -ECONNREFUSED;
- chan->mode = rfc.mode;
+ pi->mode = rfc.mode;
if (*result == L2CAP_CONF_SUCCESS) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
case L2CAP_MODE_STREAMING:
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
- req->dcid = cpu_to_le16(chan->dcid);
+ req->dcid = cpu_to_le16(pi->dcid);
req->flags = cpu_to_le16(0x0000);
return ptr - data;
}
-static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
+static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- BT_DBG("chan %p", chan);
+ BT_DBG("sk %p", sk);
- rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(flags);
return ptr - data;
}
-void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
{
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = chan->conn;
- u8 buf[128];
-
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, chan->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
- return;
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
-}
-
-static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
-{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
- BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
- if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
+ if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
return;
while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -2267,15 +3804,125 @@
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
- chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
break;
case L2CAP_MODE_STREAMING:
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ pi->mps = le16_to_cpu(rfc.max_pdu_size);
}
}
+static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_ext_fs fs;
+
+ BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if ((type == L2CAP_CONF_EXT_FS) &&
+ (olen == sizeof(struct l2cap_conf_ext_fs))) {
+ memcpy(&fs, (void *)val, olen);
+ pi->local_fs.id = fs.id;
+ pi->local_fs.type = fs.type;
+ pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
+ pi->local_fs.sdu_arr_time =
+ le32_to_cpu(fs.sdu_arr_time);
+ pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
+ pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
+ break;
+ }
+ }
+
+}
+
+static int l2cap_finish_amp_move(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ int err;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+
+ if (pi->ampcon)
+ pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
+ else
+ pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_setup_resegment(sk);
+
+ return err;
+}
+
+static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
+ u16 result)
+{
+ int err = 0;
+ struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
+
+ if (pi->reconf_state == L2CAP_RECONF_NONE)
+ return -ECONNREFUSED;
+
+ if (result == L2CAP_CONF_SUCCESS) {
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ int type, olen;
+ unsigned long val;
+
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ if (type == L2CAP_CONF_RFC) {
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+ if (rfc.mode != pi->mode &&
+ rfc.mode != L2CAP_MODE_ERTM) {
+ err = -ECONNREFUSED;
+ goto done;
+ }
+ break;
+ }
+ }
+ }
+
+done:
+ l2cap_ertm_stop_ack_timer(pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_ertm_stop_monitor_timer(pi);
+
+ if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
+ l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
+
+ /* Respond to poll */
+ err = l2cap_answer_move_poll(sk);
+
+ } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
+
+ /* If moving to BR/EDR, use default timeout defined by
+ * the spec */
+ if (pi->amp_move_id == 0)
+ pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ if (pi->mode == L2CAP_MODE_ERTM) {
+ l2cap_ertm_tx(sk, NULL, NULL,
+ L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ }
+ }
+
+ return err;
+}
+
+
static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
@@ -2296,11 +3943,14 @@
return 0;
}
-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u8 *data, u8 rsp_code,
+ u8 amp_id)
{
+ struct l2cap_chan_list *list = &conn->chan_list;
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
- struct l2cap_chan *chan = NULL, *pchan;
struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
@@ -2310,14 +3960,12 @@
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
- if (!pchan) {
+ parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
+ if (!parent) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
}
- parent = pchan->sk;
-
bh_lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
@@ -2336,63 +3984,71 @@
goto response;
}
- chan = pchan->ops->new_connection(pchan->data);
- if (!chan)
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
+ if (!sk)
goto response;
- sk = chan->sk;
-
- write_lock_bh(&conn->chan_lock);
+ write_lock_bh(&list->lock);
/* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(conn, scid)) {
- write_unlock_bh(&conn->chan_lock);
+ if (__l2cap_get_chan_by_dcid(list, scid)) {
+ write_unlock_bh(&list->lock);
sock_set_flag(sk, SOCK_ZAPPED);
- chan->ops->close(chan->data);
+ l2cap_sock_kill(sk);
+ sk = NULL;
goto response;
}
hci_conn_hold(conn->hcon);
+ l2cap_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
- chan->psm = psm;
- chan->dcid = scid;
+ l2cap_pi(sk)->psm = psm;
+ l2cap_pi(sk)->dcid = scid;
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ __l2cap_chan_add(conn, sk);
+ dcid = l2cap_pi(sk)->scid;
+ l2cap_pi(sk)->amp_id = amp_id;
- dcid = chan->scid;
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- __set_chan_timer(chan, sk->sk_sndtimeo);
-
- chan->ident = cmd->ident;
+ l2cap_pi(sk)->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_check_security(chan)) {
+ if (l2cap_check_security(sk)) {
if (bt_sk(sk)->defer_setup) {
- l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
parent->sk_data_ready(parent, 0);
} else {
- l2cap_state_change(chan, BT_CONFIG);
- result = L2CAP_CR_SUCCESS;
+ /* Force pending result for AMP controllers.
+ * The connection will succeed after the
+ * physical link is up. */
+ if (amp_id) {
+ sk->sk_state = BT_CONNECT2;
+ result = L2CAP_CR_PEND;
+ } else {
+ sk->sk_state = BT_CONFIG;
+ result = L2CAP_CR_SUCCESS;
+ }
status = L2CAP_CS_NO_INFO;
}
} else {
- l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
}
} else {
- l2cap_state_change(chan, BT_CONNECT2);
+ sk->sk_state = BT_CONNECT2;
result = L2CAP_CR_PEND;
status = L2CAP_CS_NO_INFO;
}
- write_unlock_bh(&conn->chan_lock);
+ write_unlock_bh(&list->lock);
response:
bh_unlock_sock(parent);
@@ -2402,9 +4058,9 @@
rsp.dcid = cpu_to_le16(dcid);
rsp.result = cpu_to_le16(result);
rsp.status = cpu_to_le16(status);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
- if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
struct l2cap_info_req info;
info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -2418,15 +4074,22 @@
L2CAP_INFO_REQ, sizeof(info), &info);
}
- if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
result == L2CAP_CR_SUCCESS) {
u8 buf[128];
- set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
+ return sk;
+}
+
+static inline int l2cap_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
return 0;
}
@@ -2434,7 +4097,6 @@
{
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
u16 scid, dcid, result, status;
- struct l2cap_chan *chan;
struct sock *sk;
u8 req[128];
@@ -2446,46 +4108,46 @@
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
if (scid) {
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
return -EFAULT;
} else {
- chan = l2cap_get_chan_by_ident(conn, cmd->ident);
- if (!chan)
+ sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
+ if (!sk)
return -EFAULT;
}
- sk = chan->sk;
-
switch (result) {
case L2CAP_CR_SUCCESS:
- l2cap_state_change(chan, BT_CONFIG);
- chan->ident = 0;
- chan->dcid = dcid;
- clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ sk->sk_state = BT_CONFIG;
+ l2cap_pi(sk)->ident = 0;
+ l2cap_pi(sk)->dcid = dcid;
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
- if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
break;
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, req), req);
+ l2cap_pi(sk)->num_conf_req++;
break;
case L2CAP_CR_PEND:
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
break;
default:
/* don't delete l2cap channel if sk is owned by user */
if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, HZ / 5);
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
break;
}
- l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_del(sk, ECONNREFUSED);
break;
}
@@ -2493,38 +4155,54 @@
return 0;
}
-static inline void set_default_fcs(struct l2cap_chan *chan)
+static inline void set_default_fcs(struct l2cap_pinfo *pi)
{
/* FCS is enabled only in ERTM or streaming mode, if one or both
* sides request it.
*/
- if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
- chan->fcs = L2CAP_FCS_NONE;
- else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
- chan->fcs = L2CAP_FCS_CRC16;
+ if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
+ pi->fcs = L2CAP_FCS_NONE;
+ else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
+ pi->fcs = L2CAP_FCS_CRC16;
}
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
u16 dcid, flags;
- u8 rsp[64];
- struct l2cap_chan *chan;
+ u8 rspbuf[64];
+ struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
struct sock *sk;
int len;
+ u8 amp_move_reconf = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
- chan = l2cap_get_chan_by_scid(conn, dcid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+ if (!sk)
return -ENOENT;
- sk = chan->sk;
+ BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
+ "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
+ sk->sk_state, l2cap_pi(sk)->rx_state,
+ l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
+ l2cap_pi(sk)->amp_move_id);
- if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
+ /* Detect a reconfig request due to channel move between
+ * BR/EDR and AMP
+ */
+ if (sk->sk_state == BT_CONNECTED &&
+ l2cap_pi(sk)->rx_state ==
+ L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
+ l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
+
+ if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
+ amp_move_reconf = 1;
+
+ if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
struct l2cap_cmd_rej rej;
rej.reason = cpu_to_le16(0x0002);
@@ -2535,61 +4213,80 @@
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
- if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
+ if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_REJECT, flags), rsp);
+ l2cap_build_conf_rsp(sk, rspbuf,
+ L2CAP_CONF_REJECT, flags), rspbuf);
goto unlock;
}
/* Store config. */
- memcpy(chan->conf_req + chan->conf_len, req->data, len);
- chan->conf_len += len;
+ memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
+ l2cap_pi(sk)->conf_len += len;
if (flags & 0x0001) {
/* Incomplete config. Send empty response. */
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0001), rsp);
+ l2cap_build_conf_rsp(sk, rspbuf,
+ L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
goto unlock;
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ if (!amp_move_reconf)
+ len = l2cap_parse_conf_req(sk, rspbuf);
+ else
+ len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
+
if (len < 0) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto unlock;
}
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
- chan->num_conf_rsp++;
+ l2cap_pi(sk)->conf_ident = cmd->ident;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
+ rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
+ !l2cap_pi(sk)->amp_id) {
+ /* Send success response right after pending if using
+ * lockstep config on BR/EDR
+ */
+ rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
+ }
/* Reset config buffer. */
- chan->conf_len = 0;
+ l2cap_pi(sk)->conf_len = 0;
- if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
+ if (amp_move_reconf)
goto unlock;
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- set_default_fcs(chan);
+ l2cap_pi(sk)->num_conf_rsp++;
- l2cap_state_change(chan, BT_CONNECTED);
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
+ goto unlock;
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+ set_default_fcs(l2cap_pi(sk));
+
+ sk->sk_state = BT_CONNECTED;
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
l2cap_chan_ready(sk);
goto unlock;
}
- if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
u8 buf[64];
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
- chan->num_conf_req++;
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
unlock:
@@ -2601,8 +4298,8 @@
{
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
- struct l2cap_chan *chan;
struct sock *sk;
+ struct l2cap_pinfo *pi;
int len = cmd->len - sizeof(*rsp);
scid = __le16_to_cpu(rsp->scid);
@@ -2612,38 +4309,84 @@
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
scid, flags, result);
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
return 0;
- sk = chan->sk;
+ pi = l2cap_pi(sk);
+
+ if (pi->reconf_state != L2CAP_RECONF_NONE) {
+ l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
+ goto done;
+ }
switch (result) {
case L2CAP_CONF_SUCCESS:
- l2cap_conf_rfc_get(chan, rsp->data, len);
+ if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
+ !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
+ /* Lockstep procedure requires a pending response
+ * before success.
+ */
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ goto done;
+ }
+
+ l2cap_conf_rfc_get(sk, rsp->data, len);
break;
+ case L2CAP_CONF_PENDING:
+ if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
+ goto done;
+ }
+
+ l2cap_conf_rfc_get(sk, rsp->data, len);
+
+ pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
+
+ l2cap_conf_ext_fs_get(sk, rsp->data, len);
+
+ if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
+ struct hci_chan *chan;
+
+ /* Already sent a 'pending' response, so set up
+ * the logical link now
+ */
+ chan = l2cap_chan_admit(pi->amp_id, pi);
+ if (!chan) {
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ goto done;
+ }
+
+ chan->l2cap_sk = sk;
+ if (chan->state == BT_CONNECTED)
+ l2cap_create_cfm(chan, 0);
+ }
+
+ goto done;
+
case L2CAP_CONF_UNACCEPT:
- if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+ if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
- len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ len = l2cap_parse_conf_rsp(sk, rsp->data,
+ len, req, &result);
if (len < 0) {
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ, len, req);
- chan->num_conf_req++;
+ pi->num_conf_req++;
if (result != L2CAP_CONF_SUCCESS)
goto done;
break;
@@ -2651,25 +4394,24 @@
default:
sk->sk_err = ECONNRESET;
- __set_chan_timer(chan, HZ * 5);
- l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ l2cap_sock_set_timer(sk, HZ * 5);
+ l2cap_send_disconn_req(conn, sk, ECONNRESET);
goto done;
}
if (flags & 0x01)
goto done;
- set_bit(CONF_INPUT_DONE, &chan->conf_state);
+ pi->conf_state |= L2CAP_CONF_INPUT_DONE;
- if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
- set_default_fcs(chan);
+ if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
+ set_default_fcs(pi);
- l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ sk->sk_state = BT_CONNECTED;
+
+ if (pi->mode == L2CAP_MODE_ERTM ||
+ pi->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
l2cap_chan_ready(sk);
}
@@ -2684,7 +4426,6 @@
struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
struct l2cap_disconn_rsp rsp;
u16 dcid, scid;
- struct l2cap_chan *chan;
struct sock *sk;
scid = __le16_to_cpu(req->scid);
@@ -2692,31 +4433,43 @@
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
- chan = l2cap_get_chan_by_scid(conn, dcid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
+ if (!sk)
return 0;
- sk = chan->sk;
-
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
- sk->sk_shutdown = SHUTDOWN_MASK;
+ /* Only do cleanup if a disconnect request was not sent already */
+ if (sk->sk_state != BT_DISCONN) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ skb_queue_purge(TX_QUEUE(sk));
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
+ __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
+ }
+ }
/* don't delete l2cap channel if sk is owned by user */
if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, HZ / 5);
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
bh_unlock_sock(sk);
return 0;
}
- l2cap_chan_del(chan, ECONNRESET);
+ l2cap_chan_del(sk, ECONNRESET);
+
bh_unlock_sock(sk);
- chan->ops->close(chan->data);
+ l2cap_sock_kill(sk);
return 0;
}
@@ -2724,7 +4477,6 @@
{
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
u16 dcid, scid;
- struct l2cap_chan *chan;
struct sock *sk;
scid = __le16_to_cpu(rsp->scid);
@@ -2732,25 +4484,23 @@
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
- chan = l2cap_get_chan_by_scid(conn, scid);
- if (!chan)
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
+ if (!sk)
return 0;
- sk = chan->sk;
-
/* don't delete l2cap channel if sk is owned by user */
if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan,BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, HZ / 5);
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
bh_unlock_sock(sk);
return 0;
}
- l2cap_chan_del(chan, 0);
+ l2cap_chan_del(sk, 0);
bh_unlock_sock(sk);
- chan->ops->close(chan->data);
+ l2cap_sock_kill(sk);
return 0;
}
@@ -2771,7 +4521,7 @@
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
- | L2CAP_FEAT_FCS;
+ | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
@@ -2838,6 +4588,7 @@
l2cap_conn_start(conn);
}
} else if (type == L2CAP_IT_FIXED_CHAN) {
+ conn->fc_mask = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -2847,6 +4598,878 @@
return 0;
}
+static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
+{
+ struct l2cap_move_chan_req req;
+ u8 ident;
+
+ BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
+ (int) dest_amp_id);
+
+ ident = l2cap_get_ident(conn);
+ if (pi)
+ pi->ident = ident;
+
+ req.icid = cpu_to_le16(icid);
+ req.dest_amp_id = dest_amp_id;
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result)
+{
+ struct l2cap_move_chan_rsp rsp;
+
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
+
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_pinfo *pi, u16 icid, u16 result)
+{
+ struct l2cap_move_chan_cfm cfm;
+ u8 ident;
+
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
+
+ ident = l2cap_get_ident(conn);
+ if (pi)
+ pi->ident = ident;
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid)
+{
+ struct l2cap_move_chan_cfm_rsp rsp;
+
+ BT_DBG("icid %d", (int) icid);
+
+ rsp.icid = cpu_to_le16(icid);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_create_chan_req *req =
+ (struct l2cap_create_chan_req *) data;
+ struct sock *sk;
+ u16 psm, scid;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
+ (int) req->amp_id);
+
+ if (req->amp_id) {
+ struct hci_dev *hdev;
+
+ /* Validate AMP controller id */
+ hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
+ if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
+ struct l2cap_create_chan_rsp rsp;
+
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
+ rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ return 0;
+ }
+
+ hci_dev_put(hdev);
+ }
+
+ sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
+
+ if (sk && req->amp_id)
+ amp_accept_physical(conn, req->amp_id, sk);
+
+ return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ BT_DBG("conn %p", conn);
+
+ return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
+ u16 icid = 0;
+ u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
+
+ icid = le16_to_cpu(req->icid);
+
+ BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
+
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk)
+ goto send_move_response;
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ if (pi->scid < L2CAP_CID_DYN_START ||
+ (pi->mode != L2CAP_MODE_ERTM &&
+ pi->mode != L2CAP_MODE_STREAMING)) {
+ goto send_move_response;
+ }
+
+ if (pi->amp_id == req->dest_amp_id) {
+ result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
+ goto send_move_response;
+ }
+
+ if (req->dest_amp_id) {
+ struct hci_dev *hdev;
+ hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
+ if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
+ if (hdev)
+ hci_dev_put(hdev);
+
+ result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
+ goto send_move_response;
+ }
+ }
+
+ if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
+ pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
+ bacmp(conn->src, conn->dst) > 0) {
+ result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
+ goto send_move_response;
+ }
+
+ if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
+ result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
+ goto send_move_response;
+ }
+
+ pi->amp_move_cmd_ident = cmd->ident;
+ pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
+ l2cap_amp_move_setup(sk);
+ pi->amp_move_id = req->dest_amp_id;
+ icid = pi->dcid;
+
+ if (req->dest_amp_id == 0) {
+ /* Moving to BR/EDR */
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ result = L2CAP_MOVE_CHAN_PENDING;
+ } else {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ result = L2CAP_MOVE_CHAN_SUCCESS;
+ }
+ } else {
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
+ amp_accept_physical(pi->conn, req->dest_amp_id, sk);
+ result = L2CAP_MOVE_CHAN_PENDING;
+ }
+
+send_move_response:
+ l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+ if (sk)
+ release_sock(sk);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
+ struct sock *sk;
+ struct l2cap_pinfo *pi;
+ u16 icid, result;
+
+ icid = le16_to_cpu(rsp->icid);
+ result = le16_to_cpu(rsp->result);
+
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
+
+ switch (result) {
+ case L2CAP_MOVE_CHAN_SUCCESS:
+ case L2CAP_MOVE_CHAN_PENDING:
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ l2cap_send_move_chan_cfm(conn, NULL, icid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ l2cap_sock_clear_timer(sk);
+ if (result == L2CAP_MOVE_CHAN_PENDING)
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
+
+ if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
+ /* Move confirm will be sent when logical link
+ * is complete.
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
+ pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
+ /* Logical link is up or moving to BR/EDR,
+ * proceed with move */
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ } else {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
+ struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ struct hci_chan *chan;
+ /* Moving to AMP */
+ if (result == L2CAP_MOVE_CHAN_SUCCESS) {
+ /* Remote is ready, send confirm immediately
+ * after logical link is ready
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ } else {
+ /* Both logical link and move success
+ * are required to confirm
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
+ }
+ pi->remote_fs = default_fs;
+ pi->local_fs = default_fs;
+ chan = l2cap_chan_admit(pi->amp_move_id, pi);
+ if (!chan) {
+ /* Logical link not available */
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+ if (chan->state == BT_CONNECTED) {
+ /* Logical link is already ready to go */
+ pi->ampchan = chan;
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+ if (result == L2CAP_MOVE_CHAN_SUCCESS) {
+ /* Can confirm now */
+ l2cap_send_move_chan_cfm(conn, pi,
+ pi->scid,
+ L2CAP_MOVE_CHAN_CONFIRMED);
+ } else {
+ /* Now only need move success
+ * required to confirm
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ }
+ } else
+ chan->l2cap_sk = sk;
+ } else {
+ /* Any other amp move state means the move failed. */
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+ break;
+ default:
+ /* Failed (including collision case) */
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ /* Could not locate channel, icid is best guess */
+ l2cap_send_move_chan_cfm(conn, NULL, icid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ break;
+ }
+
+ lock_sock(sk);
+ pi = l2cap_pi(sk);
+
+ l2cap_sock_clear_timer(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
+ pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
+ else {
+ /* Cleanup - cancel move */
+ pi->amp_move_id = pi->amp_id;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_amp_move_revert(sk);
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ }
+ } else {
+ /* State is STABLE so the confirm response is
+ * ignored.
+ */
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ }
+
+ l2cap_send_move_chan_cfm(conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ break;
+ }
+
+ if (sk)
+ release_sock(sk);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
+ struct sock *sk;
+ u16 icid, result;
+
+ icid = le16_to_cpu(cfm->icid);
+ result = le16_to_cpu(cfm->result);
+
+ BT_DBG("icid %d, result %d", (int) icid, (int) result);
+
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk) {
+ BT_DBG("Bad channel (%d)", (int) icid);
+ goto send_move_confirm_response;
+ }
+
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
+ l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
+ if ((!l2cap_pi(sk)->amp_id) &&
+ (l2cap_pi(sk)->ampchan)) {
+ /* Have moved off of AMP, free the channel */
+ hci_chan_put(l2cap_pi(sk)->ampchan);
+ if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
+ l2cap_deaggregate(l2cap_pi(sk)->ampchan,
+ l2cap_pi(sk));
+ l2cap_pi(sk)->ampchan = NULL;
+ l2cap_pi(sk)->ampcon = NULL;
+ }
+ l2cap_amp_move_success(sk);
+ } else {
+ l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
+ l2cap_amp_move_revert(sk);
+ }
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ } else if (l2cap_pi(sk)->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
+ BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
+ }
+
+send_move_confirm_response:
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ if (sk)
+ release_sock(sk);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u8 *data)
+{
+ struct l2cap_move_chan_cfm_rsp *rsp =
+ (struct l2cap_move_chan_cfm_rsp *) data;
+ struct sock *sk;
+ u16 icid;
+
+ icid = le16_to_cpu(rsp->icid);
+
+ BT_DBG("icid %d", (int) icid);
+
+ read_lock(&conn->chan_list.lock);
+ sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
+ read_unlock(&conn->chan_list.lock);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+
+ l2cap_sock_clear_timer(sk);
+
+ if (l2cap_pi(sk)->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
+ l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
+
+ if (!l2cap_pi(sk)->amp_id) {
+ /* Have moved off of AMP, free the channel */
+ l2cap_pi(sk)->ampcon = NULL;
+ if (l2cap_pi(sk)->ampchan) {
+ hci_chan_put(l2cap_pi(sk)->ampchan);
+ if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
+ l2cap_deaggregate(l2cap_pi(sk)->ampchan,
+ l2cap_pi(sk));
+ }
+ l2cap_pi(sk)->ampchan = NULL;
+ }
+
+ l2cap_amp_move_success(sk);
+
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ }
+
+ release_sock(sk);
+
+ return 0;
+}
+
+static void l2cap_amp_signal_worker(struct work_struct *work)
+{
+ int err = 0;
+ struct l2cap_amp_signal_work *ampwork =
+ container_of(work, struct l2cap_amp_signal_work, work);
+
+ switch (ampwork->cmd.code) {
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(ampwork->conn, &work->cmd,
+ ampwork->data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(ampwork->conn,
+ &work->cmd, ampwork->data);
+ break;
+
+ default:
+ BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err) {
+ struct l2cap_cmd_rej rej;
+ BT_DBG("error %d", err);
+
+ /* In this context, commands are only rejected with
+ * "command not understood", code 0.
+ */
+ rej.reason = cpu_to_le16(0);
+ l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
+ L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+ }
+
+ kfree_skb(ampwork->skb);
+ kfree(ampwork);
+}
+
+void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
+ struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
+ (int) local_id, (int) remote_id, sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
+ release_sock(sk);
+ return;
+ }
+
+ pi = l2cap_pi(sk);
+
+ if (sk->sk_state != BT_CONNECTED) {
+ if (bt_sk(sk)->parent) {
+ struct l2cap_conn_rsp rsp;
+ char buf[128];
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+
+ /* Incoming channel on AMP */
+ if (result == L2CAP_CREATE_CHAN_SUCCESS) {
+ /* Send successful response */
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ } else {
+ /* Send negative response */
+ rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ }
+
+ l2cap_send_cmd(pi->conn, pi->ident,
+ L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (result == L2CAP_CREATE_CHAN_SUCCESS) {
+ sk->sk_state = BT_CONFIG;
+ pi->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(pi->conn,
+ l2cap_get_ident(pi->conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+ }
+ } else {
+ /* Outgoing channel on AMP */
+ if (result != L2CAP_CREATE_CHAN_SUCCESS) {
+ /* Revert to BR/EDR connect */
+ l2cap_send_conn_req(sk);
+ } else {
+ pi->amp_id = local_id;
+ l2cap_send_create_chan_req(sk, remote_id);
+ }
+ }
+ } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
+ pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ l2cap_amp_move_setup(sk);
+ pi->amp_move_id = local_id;
+ pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
+
+ l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
+ pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ struct hci_chan *chan;
+ struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
+ pi->remote_fs = default_fs;
+ pi->local_fs = default_fs;
+ chan = l2cap_chan_admit(local_id, pi);
+ if (chan) {
+ if (chan->state == BT_CONNECTED) {
+ /* Logical link is ready to go */
+ pi->ampchan = chan;
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ } else {
+ /* Wait for logical link to be ready */
+ chan->l2cap_sk = sk;
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
+ }
+ } else {
+ /* Logical link not available */
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
+ }
+ } else {
+ BT_DBG("result %d, role %d, local_busy %d", result,
+ (int) pi->amp_move_role,
+ (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ if (result == -EINVAL)
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
+ else
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
+ }
+
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+
+ if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
+ l2cap_rmem_available(sk))
+ l2cap_ertm_tx(sk, 0, 0,
+ L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
+
+ /* Restart data transmission */
+ l2cap_ertm_send(sk);
+ }
+
+ release_sock(sk);
+}
+
+int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_pinfo *pi;
+ struct sock *sk;
+
+ BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
+
+ sk = chan->l2cap_sk;
+
+ BT_DBG("sk %p", sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
+ release_sock(sk);
+ return 0;
+ }
+
+ pi = l2cap_pi(sk);
+
+ if ((!status) && (chan != NULL)) {
+ pi->ampchan = chan;
+ pi->ampcon = chan->conn;
+ pi->ampcon->l2cap_data = pi->conn;
+
+ if (sk->sk_state != BT_CONNECTED) {
+ struct l2cap_conf_rsp rsp;
+
+ /* Must use spinlock to prevent concurrent
+ * execution of l2cap_config_rsp()
+ */
+ bh_lock_sock(sk);
+ l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(sk, &rsp,
+ L2CAP_CONF_SUCCESS, 0), &rsp);
+ pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+ set_default_fcs(l2cap_pi(sk));
+
+ sk->sk_state = BT_CONNECTED;
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ l2cap_chan_ready(sk);
+ }
+ bh_unlock_sock(sk);
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
+ /* Move confirm will be sent after a success
+ * response is received
+ */
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
+ } else if (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
+ else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_INITIATOR) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ } else if (pi->amp_move_role ==
+ L2CAP_AMP_MOVE_RESPONDER) {
+ pi->amp_move_state =
+ L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_SUCCESS);
+ }
+ } else {
+ /* Move was not in expected state, free the
+ * logical link
+ */
+ hci_chan_put(pi->ampchan);
+ pi->ampcon = NULL;
+ pi->ampchan = NULL;
+ }
+ } else {
+ /* Logical link setup failed. */
+
+ if (sk->sk_state != BT_CONNECTED)
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ l2cap_amp_move_revert(sk);
+ l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ l2cap_send_move_chan_rsp(pi->conn,
+ pi->amp_move_cmd_ident, pi->dcid,
+ L2CAP_MOVE_CHAN_REFUSED_CONFIG);
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ if ((pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
+ (pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
+ /* Remote has only sent pending or
+ * success responses, clean up
+ */
+ l2cap_amp_move_revert(sk);
+ l2cap_pi(sk)->amp_move_role =
+ L2CAP_AMP_MOVE_NONE;
+ pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
+ }
+
+ /* Other amp move states imply that the move
+ * has already aborted
+ */
+ l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
+ L2CAP_MOVE_CHAN_UNCONFIRMED);
+ l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
+ }
+
+ pi->ampcon = NULL;
+ pi->ampchan = NULL;
+ }
+
+ release_sock(sk);
+ return 0;
+}
+
+static void l2cap_logical_link_worker(struct work_struct *work)
+{
+ struct l2cap_logical_link_work *log_link_work =
+ container_of(work, struct l2cap_logical_link_work, work);
+
+ l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
+ kfree(log_link_work);
+}
+
+static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_logical_link_work *amp_work;
+
+ amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
+ if (!amp_work)
+ return -ENOMEM;
+
+ INIT_WORK(&_work->work, l2cap_logical_link_worker);
+ amp_work->chan = chan;
+ amp_work->status = status;
+ if (!queue_work(_l2cap_wq, &_work->work)) {
+ kfree(amp_work);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
+{
+ struct l2cap_conn *conn = chan->conn->l2cap_data;
+
+ BT_DBG("chan %p conn %p status %d", chan, conn, status);
+
+ /* TODO: if failed status restore previous fs */
+ return 0;
+}
+
+int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
+{
+ struct l2cap_chan_list *l;
+ struct l2cap_conn *conn = chan->conn->l2cap_data;
+ struct sock *sk;
+
+ BT_DBG("chan %p conn %p", chan, conn);
+
+ if (!conn)
+ return 0;
+
+ l = &conn->chan_list;
+
+ read_lock(&l->lock);
+
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
+ bh_lock_sock(sk);
+ /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
+ if (l2cap_pi(sk)->ampchan == chan) {
+ l2cap_pi(sk)->ampchan = NULL;
+ l2cap_amp_move_init(sk);
+ }
+ bh_unlock_sock(sk);
+ }
+
+ read_unlock(&l->lock);
+
+ return 0;
+
+
+}
+
+static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ u8 *data, struct sk_buff *skb)
+{
+ struct l2cap_amp_signal_work *amp_work;
+
+ amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
+ if (!amp_work)
+ return -ENOMEM;
+
+ INIT_WORK(&_work->work, l2cap_amp_signal_worker);
+ amp_work->conn = conn;
+ amp_work->cmd = *cmd;
+ amp_work->data = data;
+ amp_work->skb = skb_clone(skb, GFP_ATOMIC);
+ if (!amp_work->skb) {
+ kfree(amp_work);
+ return -ENOMEM;
+ }
+
+ if (!queue_work(_l2cap_wq, &_work->work)) {
+ kfree_skb(amp_work->skb);
+ kfree(amp_work);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
u16 to_multiplier)
{
@@ -2911,7 +5534,8 @@
}
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
+ struct sk_buff *skb)
{
int err = 0;
@@ -2959,6 +5583,20 @@
err = l2cap_information_rsp(conn, cmd, data);
break;
+ case L2CAP_CREATE_CHAN_REQ:
+ err = l2cap_create_channel_req(conn, cmd, data);
+ break;
+
+ case L2CAP_CREATE_CHAN_RSP:
+ err = l2cap_create_channel_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_REQ:
+ case L2CAP_MOVE_CHAN_RSP:
+ case L2CAP_MOVE_CHAN_CFM:
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_sig_amp(conn, cmd, data, skb);
+ break;
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -3015,7 +5653,8 @@
if (conn->hcon->type == LE_LINK)
err = l2cap_le_sig_cmd(conn, &cmd, data);
else
- err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
+ data, skb);
if (err) {
struct l2cap_cmd_rej rej;
@@ -3034,833 +5673,1331 @@
kfree_skb(skb);
}
-static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
- int hdr_size = L2CAP_HDR_SIZE + 2;
+ int hdr_size;
- if (chan->fcs == L2CAP_FCS_CRC16) {
- skb_trim(skb, skb->len - 2);
+ if (pi->extended_control)
+ hdr_size = L2CAP_EXTENDED_HDR_SIZE;
+ else
+ hdr_size = L2CAP_ENHANCED_HDR_SIZE;
+
+ if (pi->fcs == L2CAP_FCS_CRC16) {
+ skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
- if (our_fcs != rcv_fcs)
+ if (our_fcs != rcv_fcs) {
+ BT_DBG("Bad FCS");
return -EBADMSG;
+ }
}
return 0;
}
-static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
+static void l2cap_ertm_pass_to_tx(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- u16 control = 0;
-
- chan->frames_sent = 0;
-
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- l2cap_send_sframe(chan, control);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- }
-
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- l2cap_ertm_send(chan);
-
- if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
- chan->frames_sent == 0) {
- control |= L2CAP_SUPER_RCV_READY;
- l2cap_send_sframe(chan, control);
- }
+ BT_DBG("sk %p, control %p", sk, control);
+ l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
}
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- struct sk_buff *next_skb;
- int tx_seq_offset, next_tx_seq_offset;
+ BT_DBG("sk %p, control %p", sk, control);
+ l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
+}
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+static void l2cap_ertm_resend(struct sock *sk)
+{
+ struct bt_l2cap_control control;
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+ struct sk_buff *tx_skb;
+ u16 seq;
- next_skb = skb_peek(&chan->srej_q);
- if (!next_skb) {
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
- }
+ BT_DBG("sk %p", sk);
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ pi = l2cap_pi(sk);
- do {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
- return -EINVAL;
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return;
- next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
- chan->buffer_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
+ pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
+ return;
- if (next_tx_seq_offset > tx_seq_offset) {
- __skb_queue_before(&chan->srej_q, next_skb, skb);
- return 0;
+ while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&pi->retrans_list);
+
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ (int) seq);
+ continue;
}
- if (skb_queue_is_last(&chan->srej_q, next_skb))
+ bt_cb(skb)->retries += 1;
+ control = bt_cb(skb)->control;
+
+ if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ l2cap_seq_list_clear(&pi->retrans_list);
break;
-
- } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
-
- __skb_queue_tail(&chan->srej_q, skb);
-
- return 0;
-}
-
-static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
-{
- struct sk_buff *_skb;
- int err;
-
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
-
- return chan->ops->recv(chan->data, skb);
-
- case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
-
- chan->sdu_len = get_unaligned_le16(skb->data);
-
- if (chan->sdu_len > chan->imtu)
- goto disconnect;
-
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu)
- return -ENOMEM;
-
- /* pull sdu_len bytes only after alloc, because of Local Busy
- * condition we have to be sure that this will be executed
- * only once, i.e., when alloc does not fail */
- skb_pull(skb, 2);
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
- break;
-
- case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
- if (!chan->sdu)
- goto disconnect;
-
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- goto drop;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- break;
-
- case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
- if (!chan->sdu)
- goto disconnect;
-
- chan->partial_sdu_len += skb->len;
-
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
-
- if (chan->partial_sdu_len != chan->sdu_len)
- goto drop;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- if (!_skb) {
- return -ENOMEM;
}
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0) {
- kfree_skb(_skb);
- return err;
+ control.reqseq = pi->buffer_seq;
+ if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
+ control.final = 1;
+ pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
+ } else {
+ control.final = 0;
}
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ }
- kfree_skb(chan->sdu);
- break;
- }
+ /* Update skb contents */
+ if (pi->extended_control) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ }
- kfree_skb(skb);
- return 0;
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ apply_fcs(tx_skb);
-drop:
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
+ tx_skb->sk = sk;
+ tx_skb->destructor = l2cap_skb_destructor;
+ atomic_inc(&pi->ertm_queued);
-disconnect:
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- kfree_skb(skb);
- return 0;
-}
+ l2cap_do_send(sk, tx_skb);
-static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
-{
- u16 control;
+ BT_DBG("Resent txseq %d", (int)control.txseq);
- BT_DBG("chan %p, Enter local busy", chan);
-
- set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
-
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_NOT_READY;
- l2cap_send_sframe(chan, control);
-
- set_bit(CONN_RNR_SENT, &chan->conn_state);
-
- __clear_ack_timer(chan);
-}
-
-static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
-{
- u16 control;
-
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- goto done;
-
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
- l2cap_send_sframe(chan, control);
- chan->retry_count = 1;
-
- __clear_retrans_timer(chan);
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
-done:
- clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- clear_bit(CONN_RNR_SENT, &chan->conn_state);
-
- BT_DBG("chan %p, Exit local busy", chan);
-}
-
-void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
-{
- if (chan->mode == L2CAP_MODE_ERTM) {
- if (busy)
- l2cap_ertm_enter_local_busy(chan);
- else
- l2cap_ertm_exit_local_busy(chan);
+ pi->last_acked_seq = pi->buffer_seq;
}
}
-static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static inline void l2cap_ertm_retransmit(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- struct sk_buff *_skb;
+ BT_DBG("sk %p, control %p", sk, control);
+
+ l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
+ l2cap_ertm_resend(sk);
+}
+
+static void l2cap_ertm_retransmit_all(struct sock *sk,
+ struct bt_l2cap_control *control)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+
+ BT_DBG("sk %p, control %p", sk, control);
+
+ pi = l2cap_pi(sk);
+
+ if (control->poll)
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+
+ l2cap_seq_list_clear(&pi->retrans_list);
+
+ if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ return;
+
+ if (pi->unacked_frames) {
+ skb_queue_walk(TX_QUEUE(sk), skb) {
+ if ((bt_cb(skb)->control.txseq == control->reqseq) ||
+ skb == sk->sk_send_head)
+ break;
+ }
+
+ skb_queue_walk_from(TX_QUEUE(sk), skb) {
+ if (skb == sk->sk_send_head)
+ break;
+
+ l2cap_seq_list_append(&pi->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(sk);
+ }
+}
+
+static inline void append_skb_frag(struct sk_buff *skb,
+ struct sk_buff *new_frag, struct sk_buff **last_frag)
+{
+ /* skb->len reflects data in skb as well as all fragments
+ skb->data_len reflects only data in fragments
+ */
+ BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
+
+ if (!skb_has_frag_list(skb))
+ skb_shinfo(skb)->frag_list = new_frag;
+
+ new_frag->next = NULL;
+
+ (*last_frag)->next = new_frag;
+ *last_frag = new_frag;
+
+ skb->len += new_frag->len;
+ skb->data_len += new_frag->len;
+ skb->truesize += new_frag->truesize;
+}
+
+static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
+ struct bt_l2cap_control *control, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi;
int err = -EINVAL;
- /*
- * TODO: We have to notify the userland if some data is lost with the
- * Streaming Mode.
- */
+ BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
+ skb, skb->len, skb->truesize);
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
+ if (!control)
+ return err;
+
+ pi = l2cap_pi(sk);
+
+ BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
+ control->frame_type, control->sar, control->txseq,
+ control->reqseq, control->final);
+
+ switch (control->sar) {
+ case L2CAP_SAR_UNSEGMENTED:
+ if (pi->sdu) {
+ BT_DBG("Unexpected unsegmented PDU during reassembly");
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
}
- err = chan->ops->recv(chan->data, skb);
- if (!err)
- return 0;
-
+ BT_DBG("Unsegmented");
+ err = sock_queue_rcv_skb(sk, skb);
break;
- case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
+ case L2CAP_SAR_START:
+ if (pi->sdu) {
+ BT_DBG("Unexpected start PDU during reassembly");
+ kfree_skb(pi->sdu);
}
- chan->sdu_len = get_unaligned_le16(skb->data);
+ pi->sdu_len = get_unaligned_le16(skb->data);
skb_pull(skb, 2);
- if (chan->sdu_len > chan->imtu) {
+ if (pi->sdu_len > pi->imtu) {
err = -EMSGSIZE;
break;
}
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu) {
- err = -ENOMEM;
+ if (skb->len >= pi->sdu_len)
break;
- }
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ pi->sdu = skb;
+ pi->sdu_last_frag = skb;
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
+ BT_DBG("Start");
+
+ skb = NULL;
err = 0;
break;
- case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
+ case L2CAP_SAR_CONTINUE:
+ if (!pi->sdu)
break;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ append_skb_frag(pi->sdu, skb,
+ &pi->sdu_last_frag);
+ skb = NULL;
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- kfree_skb(chan->sdu);
- else
- err = 0;
+ if (pi->sdu->len >= pi->sdu_len)
+ break;
+ BT_DBG("Continue, reassembled %d", pi->sdu->len);
+
+ err = 0;
break;
- case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
+ case L2CAP_SAR_END:
+ if (!pi->sdu)
break;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ append_skb_frag(pi->sdu, skb,
+ &pi->sdu_last_frag);
+ skb = NULL;
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len += skb->len;
+ if (pi->sdu->len != pi->sdu_len)
+ break;
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
+ BT_DBG("End, reassembled %d", pi->sdu->len);
+ /* If the sender used tiny PDUs, the rcv queuing could fail.
+ * Applications that have issues here should use a larger
+ * sk_rcvbuf.
+ */
+ err = sock_queue_rcv_skb(sk, pi->sdu);
- if (chan->partial_sdu_len == chan->sdu_len) {
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0)
- kfree_skb(_skb);
+ if (!err) {
+ /* Reassembly complete */
+ pi->sdu = NULL;
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
}
- err = 0;
+ break;
-drop:
- kfree_skb(chan->sdu);
+ default:
+ BT_DBG("Bad SAR value");
break;
}
- kfree_skb(skb);
+ if (err) {
+ BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
+ err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
+ if (pi->sdu) {
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ }
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
+ if (skb)
+ kfree_skb(skb);
+ }
+
+ /* Update local busy state */
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
+ l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
+
return err;
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
{
- struct sk_buff *skb;
- u16 control;
-
- while ((skb = skb_peek(&chan->srej_q)) &&
- !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- int err;
-
- if (bt_cb(skb)->tx_seq != tx_seq)
- break;
-
- skb = skb_dequeue(&chan->srej_q);
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- err = l2cap_ertm_reassembly_sdu(chan, skb, control);
-
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- break;
- }
-
- chan->buffer_seq_srej =
- (chan->buffer_seq_srej + 1) % 64;
- tx_seq = (tx_seq + 1) % 64;
- }
-}
-
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
-{
- struct srej_list *l, *tmp;
- u16 control;
-
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- list_del(&l->list);
- kfree(l);
- return;
- }
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(chan, control);
- list_del(&l->list);
- list_add_tail(&l->list, &chan->srej_l);
- }
-}
-
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
-{
- struct srej_list *new;
- u16 control;
-
- while (tx_seq != chan->expected_tx_seq) {
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(chan, control);
-
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
- new->tx_seq = chan->expected_tx_seq;
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
- list_add_tail(&new->list, &chan->srej_l);
- }
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
-}
-
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
-{
- u8 tx_seq = __get_txseq(rx_control);
- u8 req_seq = __get_reqseq(rx_control);
- u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
- int tx_seq_offset, expected_tx_seq_offset;
- int num_to_ack = (chan->tx_win/6) + 1;
int err = 0;
+ /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
+ * until a gap is encountered.
+ */
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
- tx_seq, rx_control);
+ struct l2cap_pinfo *pi;
- if (L2CAP_CTRL_FINAL & rx_control &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
+ BT_DBG("sk %p", sk);
+ pi = l2cap_pi(sk);
+
+ while (l2cap_rmem_available(sk)) {
+ struct sk_buff *skb;
+ BT_DBG("Searching for skb with txseq %d (queue len %d)",
+ (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
+
+ skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
+
+ if (!skb)
+ break;
+
+ skb_unlink(skb, SREJ_QUEUE(sk));
+ pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
+ err = l2cap_ertm_rx_expected_iframe(sk,
+ &bt_cb(skb)->control, skb);
+ if (err)
+ break;
}
- chan->expected_ack_seq = req_seq;
- l2cap_drop_acked_frames(chan);
-
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
-
- /* invalid tx_seq */
- if (tx_seq_offset >= chan->tx_win) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (skb_queue_empty(SREJ_QUEUE(sk))) {
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_ertm_send_ack(sk);
}
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
- goto drop;
-
- if (tx_seq == chan->expected_tx_seq)
- goto expected;
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- struct srej_list *first;
-
- first = list_first_entry(&chan->srej_l,
- struct srej_list, list);
- if (tx_seq == first->tx_seq) {
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
- l2cap_check_srej_gap(chan, tx_seq);
-
- list_del(&first->list);
- kfree(first);
-
- if (list_empty(&chan->srej_l)) {
- chan->buffer_seq = chan->buffer_seq_srej;
- clear_bit(CONN_SREJ_SENT, &chan->conn_state);
- l2cap_send_ack(chan);
- BT_DBG("chan %p, Exit SREJ_SENT", chan);
- }
- } else {
- struct srej_list *l;
-
- /* duplicated tx_seq */
- if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
- goto drop;
-
- list_for_each_entry(l, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- l2cap_resend_srejframe(chan, tx_seq);
- return 0;
- }
- }
- l2cap_send_srejframe(chan, tx_seq);
- }
- } else {
- expected_tx_seq_offset =
- (chan->expected_tx_seq - chan->buffer_seq) % 64;
- if (expected_tx_seq_offset < 0)
- expected_tx_seq_offset += 64;
-
- /* duplicated tx_seq */
- if (tx_seq_offset < expected_tx_seq_offset)
- goto drop;
-
- set_bit(CONN_SREJ_SENT, &chan->conn_state);
-
- BT_DBG("chan %p, Enter SREJ", chan);
-
- INIT_LIST_HEAD(&chan->srej_l);
- chan->buffer_seq_srej = chan->buffer_seq;
-
- __skb_queue_head_init(&chan->srej_q);
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
-
- set_bit(CONN_SEND_PBIT, &chan->conn_state);
-
- l2cap_send_srejframe(chan, tx_seq);
-
- __clear_ack_timer(chan);
- }
- return 0;
-
-expected:
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
- }
-
- err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
- chan->buffer_seq = (chan->buffer_seq + 1) % 64;
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- return err;
- }
-
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- }
-
- __set_ack_timer(chan);
-
- chan->num_acked = (chan->num_acked + 1) % num_to_ack;
- if (chan->num_acked == num_to_ack - 1)
- l2cap_send_ack(chan);
-
- return 0;
-
-drop:
- kfree_skb(skb);
- return 0;
+ return err;
}
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static void l2cap_ertm_handle_srej(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
- rx_control);
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
- chan->expected_ack_seq = __get_reqseq(rx_control);
- l2cap_drop_acked_frames(chan);
+ BT_DBG("sk %p, control %p", sk, control);
- if (rx_control & L2CAP_CTRL_POLL) {
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
+ pi = l2cap_pi(sk);
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- l2cap_send_srejtail(chan);
- } else {
- l2cap_send_i_or_rr_or_rnr(chan);
- }
-
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- } else {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- else
- l2cap_ertm_send(chan);
- }
-}
-
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
-{
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- } else {
- l2cap_retransmit_frames(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state))
- set_bit(CONN_REJ_ACT, &chan->conn_state);
- }
-}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
-{
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (rx_control & L2CAP_CTRL_POLL) {
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- l2cap_retransmit_one_frame(chan, tx_seq);
-
- l2cap_ertm_send(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
- chan->srej_save_reqseq == tx_seq)
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- else
- l2cap_retransmit_one_frame(chan, tx_seq);
- } else {
- l2cap_retransmit_one_frame(chan, tx_seq);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
- }
-}
-
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
-{
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- if (rx_control & L2CAP_CTRL_POLL)
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
-
- if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- __clear_retrans_timer(chan);
- if (rx_control & L2CAP_CTRL_POLL)
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
+ if (control->reqseq == pi->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting",
+ (int) control->reqseq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
return;
}
- if (rx_control & L2CAP_CTRL_POLL)
- l2cap_send_srejtail(chan);
- else
- l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
-}
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
-{
- BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
-
- if (L2CAP_CTRL_FINAL & rx_control &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
+ if (skb == NULL) {
+ BT_DBG("Seq %d not available for retransmission",
+ (int) control->reqseq);
+ return;
}
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
- l2cap_data_channel_rrframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_REJECT:
- l2cap_data_channel_rejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_SELECT_REJECT:
- l2cap_data_channel_srejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_RCV_NOT_READY:
- l2cap_data_channel_rnrframe(chan, rx_control);
- break;
+ if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
}
- kfree_skb(skb);
- return 0;
-}
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
-{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- u16 control;
- u8 req_seq;
- int len, next_tx_seq_offset, req_seq_offset;
+ if (control->poll) {
+ l2cap_ertm_pass_to_tx(sk, control);
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
- len = skb->len;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_retransmit(sk, control);
+ l2cap_ertm_send(sk);
- /*
- * We can just drop the corrupted I-frame here.
- * Receiver will miss it and start proper recovery
- * procedures and ask retransmission.
- */
- if (l2cap_check_fcs(chan, skb))
- goto drop;
-
- if (__is_sar_start(control) && __is_iframe(control))
- len -= 2;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
-
- if (len > chan->mps) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- req_seq = __get_reqseq(control);
- req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
- if (req_seq_offset < 0)
- req_seq_offset += 64;
-
- next_tx_seq_offset =
- (chan->next_tx_seq - chan->expected_ack_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
-
- /* check for invalid req-seq */
- if (req_seq_offset > next_tx_seq_offset) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- if (__is_iframe(control)) {
- if (len < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ pi->srej_save_reqseq = control->reqseq;
}
-
- l2cap_data_channel_iframe(chan, control, skb);
} else {
- if (len != 0) {
- BT_ERR("%d", len);
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ l2cap_ertm_pass_to_tx_fbit(sk, control);
+
+ if (control->final) {
+ if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+ (pi->srej_save_reqseq == control->reqseq)) {
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ } else {
+ l2cap_ertm_retransmit(sk, control);
+ }
+ } else {
+ l2cap_ertm_retransmit(sk, control);
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ pi->srej_save_reqseq = control->reqseq;
+ }
}
-
- l2cap_data_channel_sframe(chan, control, skb);
}
-
- return 0;
-
-drop:
- kfree_skb(skb);
- return 0;
}
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
+static void l2cap_ertm_handle_rej(struct sock *sk,
+ struct bt_l2cap_control *control)
{
- struct l2cap_chan *chan;
- struct sock *sk = NULL;
- u16 control;
- u8 tx_seq;
- int len;
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
- chan = l2cap_get_chan_by_scid(conn, cid);
- if (!chan) {
- BT_DBG("unknown cid 0x%4.4x", cid);
- goto drop;
+ BT_DBG("sk %p, control %p", sk, control);
+
+ pi = l2cap_pi(sk);
+
+ if (control->reqseq == pi->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting",
+ (int) control->reqseq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
}
- sk = chan->sk;
+ skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
- BT_DBG("chan %p, len %d", chan, skb->len);
+ if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ return;
+ }
- if (chan->state != BT_CONNECTED)
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ l2cap_ertm_retransmit_all(sk, control);
+ } else {
+ l2cap_ertm_retransmit_all(sk, control);
+ l2cap_ertm_send(sk);
+ if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
+ pi->conn_state |= L2CAP_CONN_REJ_ACT;
+ }
+}
+
+static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p, txseq %d", sk, (int)txseq);
+ pi = l2cap_pi(sk);
+
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
+ (int)pi->expected_tx_seq);
+
+ if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside "
+ "tx window after SREJ sent");
+ return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - bad txseq within tx "
+ "window after SREJ sent");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ }
+ }
+
+ if (pi->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
+ }
+
+ if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
+ }
+
+ if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - txseq not requested "
+ "with SREJ");
+ return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
+ }
+ }
+
+ if (pi->expected_tx_seq == txseq) {
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_ERTM_TXSEQ_EXPECTED;
+ }
+ }
+
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) <
+ __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_ERTM_TXSEQ_DUPLICATE;
+ }
+
+ if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
+
+ if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_ERTM_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_ERTM_TXSEQ_UNEXPECTED;
+ }
+}
+
+static int l2cap_ertm_rx_state_recv(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ bool skb_in_use = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
+ case L2CAP_ERTM_TXSEQ_EXPECTED:
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
+ }
+
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+ pi->buffer_seq = pi->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ if (err)
+ break;
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ l2cap_ertm_send(sk);
+ }
+ }
+
+ if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
+ l2cap_ertm_send_ack(sk);
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED:
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ /* Can't issue SREJ frames in the local busy state.
+ * Drop this frame, it will be seen as missing
+ * when local busy is exited.
+ */
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding unexpected seq %d",
+ control->txseq);
+ break;
+ }
+
+ /* There was a gap in the sequence, so an SREJ
+ * must be sent for each missing frame. The
+ * current frame is stored for later use.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
+ l2cap_seq_list_clear(&pi->srej_list);
+ l2cap_ertm_send_srej(sk, control->txseq);
+
+ pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE:
+ l2cap_ertm_pass_to_tx(sk, control);
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
+ pi->amp_move_state ==
+ L2CAP_AMP_STATE_WAIT_PREPARE) {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ }
+
+ l2cap_ertm_send(sk);
+ } else if (control->poll) {
+ l2cap_ertm_send_i_or_rr_or_rnr(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames)
+ l2cap_ertm_start_retrans_timer(pi);
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send(sk);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control && control->poll) {
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_rr_or_rnr(sk, 0);
+ }
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_seq_list_clear(&pi->retrans_list);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_handle_rej(sk, control);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ l2cap_ertm_handle_srej(sk, control);
+ break;
+ default:
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ u16 txseq = control->txseq;
+ bool skb_in_use = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ switch (l2cap_ertm_classify_txseq(sk, txseq)) {
+ case L2CAP_ERTM_TXSEQ_EXPECTED:
+ /* Keep frame for reassembly later */
+ l2cap_ertm_pass_to_tx(sk, control);
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ pi->expected_tx_seq = __next_seq(txseq, pi);
+ break;
+ case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&pi->srej_list);
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ err = l2cap_ertm_rx_queued_iframes(sk);
+ if (err)
+ break;
+
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED:
+ /* Got a frame that can't be reassembled yet.
+ * Save it for later, and send SREJs to cover
+ * the missing frames.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_ertm_send_srej(sk, control->txseq);
+ break;
+ case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
+ /* This frame was requested with an SREJ, but
+ * some expected retransmitted frames are
+ * missing. Request retransmission of missing
+ * SREJ'd frames.
+ */
+ skb_queue_tail(SREJ_QUEUE(sk), skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(SREJ_QUEUE(sk)));
+
+ l2cap_ertm_pass_to_tx(sk, control);
+ l2cap_ertm_send_srej_list(sk, control->txseq);
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
+ /* We've already queued this frame. Drop this copy. */
+ l2cap_ertm_pass_to_tx(sk, control);
+ break;
+ case L2CAP_ERTM_TXSEQ_DUPLICATE:
+ /* Expecting a later sequence number, so this frame
+ * was already received. Ignore it completely.
+ */
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_ERTM_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else {
+ control->final = 0;
+ l2cap_ertm_retransmit_all(sk, control);
+ }
+
+ l2cap_ertm_send(sk);
+ } else if (control->poll) {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames) {
+ l2cap_ertm_start_retrans_timer(pi);
+ }
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_srej_tail(sk);
+ } else {
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ pi->unacked_frames) {
+ l2cap_ertm_start_retrans_timer(pi);
+ }
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send_ack(sk);
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_pass_to_tx(sk, control);
+ if (control->poll)
+ l2cap_ertm_send_srej_tail(sk);
+ else {
+ struct bt_l2cap_control rr_control;
+ memset(&rr_control, 0, sizeof(rr_control));
+ rr_control.frame_type = 's';
+ rr_control.super = L2CAP_SFRAME_RR;
+ rr_control.reqseq = pi->buffer_seq;
+ l2cap_ertm_send_sframe(sk, &rr_control);
+ }
+
+ break;
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_handle_rej(sk, control);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ l2cap_ertm_handle_srej(sk, control);
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
+ struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+ bool skb_in_use = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
+ (int)event);
+ pi = l2cap_pi(sk);
+
+ /* Only handle expected frames, to avoid state changes. */
+
+ switch (event) {
+ case L2CAP_ERTM_EVENT_RECV_IFRAME:
+ if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
+ L2CAP_ERTM_TXSEQ_EXPECTED) {
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
+ }
+
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+ pi->buffer_seq = pi->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ if (err)
+ break;
+
+ if (control->final) {
+ if (pi->conn_state & L2CAP_CONN_REJ_ACT)
+ pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
+ else
+ control->final = 0;
+ }
+ }
+ break;
+ case L2CAP_ERTM_EVENT_RECV_RR:
+ case L2CAP_ERTM_EVENT_RECV_RNR:
+ case L2CAP_ERTM_EVENT_RECV_REJ:
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+ break;
+ case L2CAP_ERTM_EVENT_RECV_SREJ:
+ /* Ignore */
+ break;
+ default:
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_answer_move_poll(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control control;
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head = skb_peek(TX_QUEUE(sk));
+ else
+ sk->sk_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ pi->next_tx_seq = pi->amp_move_reqseq;
+ pi->unacked_frames = 0;
+
+ err = l2cap_finish_amp_move(sk);
+
+ if (err)
+ return err;
+
+ pi->conn_state |= L2CAP_CONN_SEND_FBIT;
+ l2cap_ertm_send_i_or_rr_or_rnr(sk);
+
+ memset(&control, 0, sizeof(control));
+ control.reqseq = pi->amp_move_reqseq;
+
+ if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
+ err = -EPROTO;
+ else
+ err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
+ pi->amp_move_event);
+
+ return err;
+}
+
+static void l2cap_amp_move_setup(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ struct sk_buff *skb;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_ertm_stop_ack_timer(pi);
+ l2cap_ertm_stop_retrans_timer(pi);
+ l2cap_ertm_stop_monitor_timer(pi);
+
+ pi->retry_count = 0;
+ skb_queue_walk(TX_QUEUE(sk), skb) {
+ if (bt_cb(skb)->retries)
+ bt_cb(skb)->retries = 1;
+ else
+ break;
+ }
+
+ pi->expected_tx_seq = pi->buffer_seq;
+
+ pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
+ l2cap_seq_list_clear(&pi->retrans_list);
+ l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
+ skb_queue_purge(SREJ_QUEUE(sk));
+
+ pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
+ pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
+
+ BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
+ pi->rx_state);
+
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+}
+
+static void l2cap_amp_move_revert(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
+}
+
+static int l2cap_amp_move_reconf(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+ u8 buf[64];
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
+ l2cap_build_amp_reconf_req(sk, buf), buf);
+ return err;
+}
+
+static void l2cap_amp_move_success(struct sock *sk)
+{
+ struct l2cap_pinfo *pi;
+
+ BT_DBG("sk %p", sk);
+
+ pi = l2cap_pi(sk);
+
+ if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
+ int err = 0;
+ /* Send reconfigure request */
+ if (pi->mode == L2CAP_MODE_ERTM) {
+ pi->reconf_state = L2CAP_RECONF_INT;
+ if (enable_reconfig)
+ err = l2cap_amp_move_reconf(sk);
+
+ if (err || !enable_reconfig) {
+ pi->reconf_state = L2CAP_RECONF_NONE;
+ l2cap_ertm_tx(sk, NULL, NULL,
+ L2CAP_ERTM_EVENT_EXPLICIT_POLL);
+ pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
+ }
+ } else
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
+ if (pi->mode == L2CAP_MODE_ERTM)
+ pi->rx_state =
+ L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
+ else
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ }
+}
+
+static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
+{
+ /* Make sure reqseq is for a packet that has been sent but not acked */
+ u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
+ return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
+}
+
+static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, state %d",
+ sk, control, skb, l2cap_pi(sk)->rx_state);
+
+ pi = l2cap_pi(sk);
+
+ if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
+ L2CAP_ERTM_TXSEQ_EXPECTED) {
+ l2cap_ertm_pass_to_tx(sk, control);
+
+ BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
+ __next_seq(pi->buffer_seq, pi));
+
+ pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
+
+ l2cap_ertm_rx_expected_iframe(sk, control, skb);
+ } else {
+ if (pi->sdu) {
+ kfree_skb(pi->sdu);
+ pi->sdu = NULL;
+ }
+ pi->sdu_last_frag = NULL;
+ pi->sdu_len = 0;
+
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+ }
+
+ pi->last_acked_seq = control->txseq;
+ pi->expected_tx_seq = __next_seq(control->txseq, pi);
+
+ return err;
+}
+
+static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
+ struct sk_buff *skb, u8 event)
+{
+ struct l2cap_pinfo *pi;
+ int err = 0;
+
+ BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
+ sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
+
+ pi = l2cap_pi(sk);
+
+ if (__valid_reqseq(pi, control->reqseq)) {
+ switch (pi->rx_state) {
+ case L2CAP_ERTM_RX_STATE_RECV:
+ err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
+ break;
+ case L2CAP_ERTM_RX_STATE_SREJ_SENT:
+ err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
+ event);
+ break;
+ case L2CAP_ERTM_RX_STATE_AMP_MOVE:
+ err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
+ event);
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
+ if (control->final) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
+
+ pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
+ l2cap_ertm_process_reqseq(sk, control->reqseq);
+
+ if (!skb_queue_empty(TX_QUEUE(sk)))
+ sk->sk_send_head =
+ skb_peek(TX_QUEUE(sk));
+ else
+ sk->sk_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ pi->next_tx_seq = control->reqseq;
+ pi->unacked_frames = 0;
+
+ if (pi->ampcon)
+ pi->conn->mtu =
+ pi->ampcon->hdev->acl_mtu;
+ else
+ pi->conn->mtu =
+ pi->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_setup_resegment(sk);
+
+ if (err)
+ break;
+
+ err = l2cap_ertm_rx_state_recv(sk, control, skb,
+ event);
+ }
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
+ if (control->poll) {
+ pi->amp_move_reqseq = control->reqseq;
+ pi->amp_move_event = event;
+ err = l2cap_answer_move_poll(sk);
+ }
+ break;
+ case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
+ if (control->poll) {
+ pi->amp_move_reqseq = control->reqseq;
+ pi->amp_move_event = event;
+
+ BT_DBG("amp_move_role 0x%2.2x, "
+ "reconf_state 0x%2.2x",
+ pi->amp_move_role, pi->reconf_state);
+
+ if (pi->reconf_state == L2CAP_RECONF_ACC)
+ err = l2cap_amp_move_reconf(sk);
+ else
+ err = l2cap_answer_move_poll(sk);
+ }
+ break;
+ default:
+ /* shut it down */
+ break;
+ }
+ } else {
+ BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
+ control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ }
+
+ return err;
+}
+
+void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
+{
+ lock_sock(sk);
+
+ l2cap_pi(sk)->fixed_channel = 1;
+
+ l2cap_pi(sk)->imtu = opt->imtu;
+ l2cap_pi(sk)->omtu = opt->omtu;
+ l2cap_pi(sk)->remote_mps = opt->omtu;
+ l2cap_pi(sk)->mps = opt->omtu;
+ l2cap_pi(sk)->flush_to = opt->flush_to;
+ l2cap_pi(sk)->mode = opt->mode;
+ l2cap_pi(sk)->fcs = opt->fcs;
+ l2cap_pi(sk)->max_tx = opt->max_tx;
+ l2cap_pi(sk)->remote_max_tx = opt->max_tx;
+ l2cap_pi(sk)->tx_win = opt->txwin_size;
+ l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
+ l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ if (opt->mode == L2CAP_MODE_ERTM ||
+ l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
+ l2cap_ertm_init(sk);
+
+ release_sock(sk);
+
+ return;
+}
+
+static const u8 l2cap_ertm_rx_func_to_event[4] = {
+ L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
+ L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
+};
+
+int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi;
+ struct bt_l2cap_control *control;
+ u16 len;
+ u8 event;
+ pi = l2cap_pi(sk);
+
+ BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
+
+ if (sk->sk_state != BT_CONNECTED)
goto drop;
- switch (chan->mode) {
+ switch (pi->mode) {
case L2CAP_MODE_BASIC:
/* If socket recv buffers overflows we drop data here
* which is *bad* because L2CAP has to be reliable.
* But we don't have any other choice. L2CAP doesn't
* provide flow control mechanism. */
- if (chan->imtu < skb->len)
+ if (pi->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!sock_queue_rcv_skb(sk, skb))
goto done;
break;
case L2CAP_MODE_ERTM:
- if (!sock_owned_by_user(sk)) {
- l2cap_ertm_data_rcv(sk, skb);
+ case L2CAP_MODE_STREAMING:
+ control = &bt_cb(skb)->control;
+ if (pi->extended_control) {
+ __get_extended_control(get_unaligned_le32(skb->data),
+ control);
+ skb_pull(skb, 4);
} else {
- if (sk_add_backlog(sk, skb))
+ __get_enhanced_control(get_unaligned_le16(skb->data),
+ control);
+ skb_pull(skb, 2);
+ }
+
+ len = skb->len;
+
+ if (l2cap_check_fcs(pi, skb))
+ goto drop;
+
+ if ((control->frame_type == 'i') &&
+ (control->sar == L2CAP_SAR_START))
+ len -= 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ len -= 2;
+
+ /*
+ * We can just drop the corrupted I-frame here.
+ * Receiver will miss it and start proper recovery
+ * procedures and ask for retransmission.
+ */
+ if (len > pi->mps) {
+ l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
+ goto drop;
+ }
+
+ if (control->frame_type == 'i') {
+
+ int err;
+
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
+
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && (pi->tx_state !=
+ L2CAP_ERTM_TX_STATE_WAIT_F))
goto drop;
+
+ if (pi->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_ERTM_EVENT_RECV_IFRAME;
+ err = l2cap_ertm_rx(sk, control, skb, event);
+ } else
+ err = l2cap_strm_rx(sk, control, skb);
+ if (err)
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ } else {
+ /* Only I-frames are expected in streaming mode */
+ if (pi->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
+ if (len != 0) {
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
+ goto drop;
+ }
+
+ /* Validate F and P bits */
+ if (control->final &&
+ ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
+ || control->poll))
+ goto drop;
+
+ event = l2cap_ertm_rx_func_to_event[control->super];
+ if (l2cap_ertm_rx(sk, control, skb, event))
+ l2cap_send_disconn_req(pi->conn, sk,
+ ECONNRESET);
}
goto done;
- case L2CAP_MODE_STREAMING:
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
- len = skb->len;
-
- if (l2cap_check_fcs(chan, skb))
- goto drop;
-
- if (__is_sar_start(control))
- len -= 2;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
-
- if (len > chan->mps || len < 0 || __is_sframe(control))
- goto drop;
-
- tx_seq = __get_txseq(control);
-
- if (chan->expected_tx_seq == tx_seq)
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
- else
- chan->expected_tx_seq = (tx_seq + 1) % 64;
-
- l2cap_streaming_reassembly_sdu(chan, skb, control);
-
- goto done;
-
default:
- BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
+ BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
break;
}
@@ -3868,34 +7005,35 @@
kfree_skb(skb);
done:
- if (sk)
- bh_unlock_sock(sk);
-
return 0;
}
+void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
+{
+ lock_sock(sk);
+ l2cap_data_channel(sk, skb);
+ release_sock(sk);
+}
+
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
{
- struct sock *sk = NULL;
- struct l2cap_chan *chan;
+ struct sock *sk;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
- if (!chan)
+ sk = l2cap_get_sock_by_psm(0, psm, conn->src);
+ if (!sk)
goto drop;
- sk = chan->sk;
-
bh_lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
- if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
- if (chan->imtu < skb->len)
+ if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!sock_queue_rcv_skb(sk, skb))
goto done;
drop:
@@ -3909,26 +7047,23 @@
static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
{
- struct sock *sk = NULL;
- struct l2cap_chan *chan;
+ struct sock *sk;
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
- if (!chan)
+ sk = l2cap_get_sock_by_scid(0, cid, conn->src);
+ if (!sk)
goto drop;
- sk = chan->sk;
-
bh_lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
- if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
goto drop;
- if (chan->imtu < skb->len)
+ if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!sock_queue_rcv_skb(sk, skb))
goto done;
drop:
@@ -3943,6 +7078,7 @@
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_hdr *lh = (void *) skb->data;
+ struct sock *sk;
u16 cid, len;
__le16 psm;
@@ -3979,7 +7115,24 @@
break;
default:
- l2cap_data_channel(conn, cid, skb);
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
+ if (sk) {
+ if (sock_owned_by_user(sk)) {
+ BT_DBG("backlog sk %p", sk);
+ if (sk_add_backlog(sk, skb))
+ kfree_skb(skb);
+ } else
+ l2cap_data_channel(sk, skb);
+
+ bh_unlock_sock(sk);
+ } else if (cid == L2CAP_CID_A2MP) {
+ BT_DBG("A2MP");
+ amp_conn_ind(conn, skb);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ kfree_skb(skb);
+ }
+
break;
}
}
@@ -3989,7 +7142,8 @@
static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
int exact = 0, lm1 = 0, lm2 = 0;
- struct l2cap_chan *c;
+ register struct sock *sk;
+ struct hlist_node *node;
if (type != ACL_LINK)
return -EINVAL;
@@ -3997,25 +7151,23 @@
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets and check their link_mode */
- read_lock(&chan_list_lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
-
- if (c->state != BT_LISTEN)
+ read_lock(&l2cap_sk_list.lock);
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (l2cap_pi(sk)->role_switch)
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (l2cap_pi(sk)->role_switch)
lm2 |= HCI_LM_MASTER;
}
}
- read_unlock(&chan_list_lock);
+ read_unlock(&l2cap_sk_list.lock);
return exact ? lm1 : lm2;
}
@@ -4034,7 +7186,7 @@
if (conn)
l2cap_conn_ready(conn);
} else
- l2cap_conn_del(hcon, bt_to_errno(status));
+ l2cap_conn_del(hcon, bt_err(status));
return 0;
}
@@ -4045,7 +7197,7 @@
BT_DBG("hcon %p", hcon);
- if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
+ if (hcon->type != ACL_LINK || !conn)
return 0x13;
return conn->disc_reason;
@@ -4058,50 +7210,51 @@
if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
return -EINVAL;
- l2cap_conn_del(hcon, bt_to_errno(reason));
+ l2cap_conn_del(hcon, bt_err(reason));
return 0;
}
-static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
+static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
{
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
return;
if (encrypt == 0x00) {
- if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, HZ * 5);
- } else if (chan->sec_level == BT_SECURITY_HIGH)
- l2cap_chan_close(chan, ECONNREFUSED);
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ * 5);
+ } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
+ __l2cap_sock_close(sk, ECONNREFUSED);
} else {
- if (chan->sec_level == BT_SECURITY_MEDIUM)
- __clear_chan_timer(chan);
+ if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
+ l2cap_sock_clear_timer(sk);
}
}
static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
{
+ struct l2cap_chan_list *l;
struct l2cap_conn *conn = hcon->l2cap_data;
- struct l2cap_chan *chan;
+ struct sock *sk;
if (!conn)
return 0;
+ l = &conn->chan_list;
+
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ read_lock(&l->lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
- struct sock *sk = chan->sk;
-
+ for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
- BT_DBG("chan->scid %d", chan->scid);
+ BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
- if (chan->scid == L2CAP_CID_LE_DATA) {
+ if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
- chan->sec_level = hcon->sec_level;
+ l2cap_pi(sk)->sec_level = hcon->sec_level;
del_timer(&conn->security_timer);
l2cap_chan_ready(sk);
smp_distribute_keys(conn, 0);
@@ -4111,68 +7264,64 @@
continue;
}
- if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
bh_unlock_sock(sk);
continue;
}
- if (!status && (chan->state == BT_CONNECTED ||
- chan->state == BT_CONFIG)) {
- l2cap_check_encryption(chan, encrypt);
+ if (!status && (sk->sk_state == BT_CONNECTED ||
+ sk->sk_state == BT_CONFIG)) {
+ l2cap_check_encryption(sk, encrypt);
bh_unlock_sock(sk);
continue;
}
- if (chan->state == BT_CONNECT) {
+ if (sk->sk_state == BT_CONNECT) {
if (!status) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
-
- chan->ident = l2cap_get_ident(conn);
- set_bit(CONF_CONNECT_PEND, &chan->conf_state);
-
- l2cap_send_cmd(conn, chan->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
+ l2cap_pi(sk)->conf_state |=
+ L2CAP_CONF_CONNECT_PEND;
+ if (l2cap_pi(sk)->amp_pref ==
+ BT_AMP_POLICY_PREFER_AMP) {
+ amp_create_physical(l2cap_pi(sk)->conn,
+ sk);
+ } else
+ l2cap_send_conn_req(sk);
} else {
- __clear_chan_timer(chan);
- __set_chan_timer(chan, HZ / 10);
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 10);
}
- } else if (chan->state == BT_CONNECT2) {
+ } else if (sk->sk_state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
- __u16 res, stat;
+ __u16 result;
if (!status) {
- if (bt_sk(sk)->defer_setup) {
- struct sock *parent = bt_sk(sk)->parent;
- res = L2CAP_CR_PEND;
- stat = L2CAP_CS_AUTHOR_PEND;
- if (parent)
- parent->sk_data_ready(parent, 0);
- } else {
- l2cap_state_change(chan, BT_CONFIG);
- res = L2CAP_CR_SUCCESS;
- stat = L2CAP_CS_NO_INFO;
+ if (l2cap_pi(sk)->amp_id) {
+ amp_accept_physical(conn,
+ l2cap_pi(sk)->amp_id, sk);
+ bh_unlock_sock(sk);
+ continue;
}
+
+ sk->sk_state = BT_CONFIG;
+ result = L2CAP_CR_SUCCESS;
} else {
- l2cap_state_change(chan, BT_DISCONN);
- __set_chan_timer(chan, HZ / 10);
- res = L2CAP_CR_SEC_BLOCK;
- stat = L2CAP_CS_NO_INFO;
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_set_timer(sk, HZ / 10);
+ result = L2CAP_CR_SEC_BLOCK;
}
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(res);
- rsp.status = cpu_to_le16(stat);
- l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
- sizeof(rsp), &rsp);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
}
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ read_unlock(&l->lock);
return 0;
}
@@ -4181,6 +7330,9 @@
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ if (!conn && hcon->hdev->dev_type != HCI_BREDR)
+ goto drop;
+
if (!conn)
conn = l2cap_conn_add(hcon, 0);
@@ -4189,9 +7341,9 @@
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
- if (!(flags & ACL_CONT)) {
+ if (flags & ACL_START) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
+ struct sock *sk;
u16 cid;
int len;
@@ -4220,6 +7372,14 @@
return 0;
}
+ if (flags & ACL_CONT) {
+ BT_ERR("Complete frame is incomplete "
+ "(len %d, expected len %d)",
+ skb->len, len);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
BT_DBG("Start: total len %d, frag len %d", len, skb->len);
if (skb->len > len) {
@@ -4229,22 +7389,19 @@
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
+ sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- bh_unlock_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
+ if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
+ BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
+ len, l2cap_pi(sk)->imtu);
bh_unlock_sock(sk);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
}
+ if (sk)
+ bh_unlock_sock(sk);
+
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
@@ -4290,22 +7447,24 @@
static int l2cap_debugfs_show(struct seq_file *f, void *p)
{
- struct l2cap_chan *c;
+ struct sock *sk;
+ struct hlist_node *node;
- read_lock_bh(&chan_list_lock);
+ read_lock_bh(&l2cap_sk_list.lock);
- list_for_each_entry(c, &chan_list, global_l) {
- struct sock *sk = c->sk;
+ sk_for_each(sk, node, &l2cap_sk_list.head) {
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst),
- c->state, __le16_to_cpu(c->psm),
- c->scid, c->dcid, c->imtu, c->omtu,
- c->sec_level, c->mode);
+ sk->sk_state, __le16_to_cpu(pi->psm),
+ pi->scid, pi->dcid,
+ pi->imtu, pi->omtu, pi->sec_level,
+ pi->mode);
}
- read_unlock_bh(&chan_list_lock);
+ read_unlock_bh(&l2cap_sk_list.lock);
return 0;
}
@@ -4332,7 +7491,10 @@
.disconn_ind = l2cap_disconn_ind,
.disconn_cfm = l2cap_disconn_cfm,
.security_cfm = l2cap_security_cfm,
- .recv_acldata = l2cap_recv_acldata
+ .recv_acldata = l2cap_recv_acldata,
+ .create_cfm = l2cap_create_cfm,
+ .modify_cfm = l2cap_modify_cfm,
+ .destroy_cfm = l2cap_destroy_cfm,
};
int __init l2cap_init(void)
@@ -4343,6 +7505,12 @@
if (err < 0)
return err;
+ _l2cap_wq = create_singlethread_workqueue("l2cap");
+ if (!_l2cap_wq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
err = hci_register_proto(&l2cap_hci_proto);
if (err < 0) {
BT_ERR("L2CAP protocol registration failed");
@@ -4357,17 +7525,28 @@
BT_ERR("Failed to create L2CAP debug file");
}
+ if (amp_init() < 0) {
+ BT_ERR("AMP Manager initialization failed");
+ goto error;
+ }
+
return 0;
error:
+ destroy_workqueue(_l2cap_wq);
l2cap_cleanup_sockets();
return err;
}
void l2cap_exit(void)
{
+ amp_exit();
+
debugfs_remove(l2cap_debugfs);
+ flush_workqueue(_l2cap_wq);
+ destroy_workqueue(_l2cap_wq);
+
if (hci_unregister_proto(&l2cap_hci_proto) < 0)
BT_ERR("L2CAP protocol unregistration failed");
@@ -4376,3 +7555,6 @@
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
+
+module_param(enable_reconfig, bool, 0644);
+MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f62..7694764 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -30,15 +30,69 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/amp.h>
-static const struct proto_ops l2cap_sock_ops;
-static void l2cap_sock_init(struct sock *sk, struct sock *parent);
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
+/* ---- L2CAP timers ---- */
+static void l2cap_sock_timeout(unsigned long arg)
+{
+ struct sock *sk = (struct sock *) arg;
+ int reason;
+
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ /* sk is owned by user. Try again later */
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (sk->sk_state == BT_CONNECT &&
+ l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ __l2cap_sock_close(sk, reason);
+
+ bh_unlock_sock(sk);
+
+ l2cap_sock_kill(sk);
+ sock_put(sk);
+}
+
+void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+void l2cap_sock_clear_timer(struct sock *sk)
+{
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ sk_for_each(sk, node, &l2cap_sk_list.head)
+ if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
+ goto found;
+ sk = NULL;
+found:
+ return sk;
+}
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
@@ -77,22 +131,26 @@
}
}
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
+ err = -EADDRINUSE;
+ } else {
+ /* Save source address */
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->sport = la.l2_psm;
+ sk->sk_state = BT_BOUND;
+
+ if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+ __le16_to_cpu(la.l2_psm) == 0x0003)
+ l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
+ }
+
if (la.l2_cid)
- err = l2cap_add_scid(chan, la.l2_cid);
- else
- err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
+ l2cap_pi(sk)->scid = la.l2_cid;
- if (err < 0)
- goto done;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- chan->sec_level = BT_SECURITY_SDP;
-
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
-
- chan->state = BT_BOUND;
- sk->sk_state = BT_BOUND;
+ write_unlock_bh(&l2cap_sk_list.lock);
done:
release_sock(sk);
@@ -102,11 +160,11 @@
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct sockaddr_l2 la;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %p type %d mode %d state %d", sk, sk->sk_type,
+ l2cap_pi(sk)->mode, sk->sk_state);
if (!addr || alen < sizeof(addr->sa_family) ||
addr->sa_family != AF_BLUETOOTH)
@@ -121,13 +179,13 @@
lock_sock(sk);
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
- && !(la.l2_psm || la.l2_cid)) {
+ if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
+ && !(la.l2_psm || la.l2_cid || l2cap_pi(sk)->fixed_channel)) {
err = -EINVAL;
goto done;
}
- switch (chan->mode) {
+ switch (l2cap_pi(sk)->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
@@ -163,18 +221,20 @@
}
/* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
+ !l2cap_pi(sk)->fixed_channel &&
+ sk->sk_type != SOCK_RAW && !la.l2_cid) {
+ BT_DBG("Bad PSM 0x%x", (int)__le16_to_cpu(la.l2_psm));
err = -EINVAL;
goto done;
}
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- chan->psm = la.l2_psm;
- chan->dcid = la.l2_cid;
+ l2cap_pi(sk)->psm = la.l2_psm;
+ l2cap_pi(sk)->dcid = la.l2_cid;
- err = l2cap_chan_connect(l2cap_pi(sk)->chan);
+ err = l2cap_do_connect(sk);
if (err)
goto done;
@@ -182,6 +242,8 @@
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
+ if (err)
+ BT_ERR("failed %d", err);
release_sock(sk);
return err;
}
@@ -189,7 +251,6 @@
static int l2cap_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
@@ -202,7 +263,7 @@
goto done;
}
- switch (chan->mode) {
+ switch (l2cap_pi(sk)->mode) {
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_ERTM:
@@ -215,10 +276,30 @@
goto done;
}
+ if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->scid) {
+ bdaddr_t *src = &bt_sk(sk)->src;
+ u16 psm;
+
+ err = -EINVAL;
+
+ write_lock_bh(&l2cap_sk_list.lock);
+
+ for (psm = 0x1001; psm < 0x1100; psm += 2)
+ if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
+ l2cap_pi(sk)->psm = cpu_to_le16(psm);
+ l2cap_pi(sk)->sport = cpu_to_le16(psm);
+ err = 0;
+ break;
+ }
+
+ write_unlock_bh(&l2cap_sk_list.lock);
+
+ if (err < 0)
+ goto done;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
-
- chan->state = BT_LISTEN;
sk->sk_state = BT_LISTEN;
done:
@@ -235,26 +316,30 @@
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- nsk = bt_accept_dequeue(sk, newsock);
- if (nsk)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -262,12 +347,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -286,7 +367,6 @@
{
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -294,13 +374,13 @@
*len = sizeof(struct sockaddr_l2);
if (peer) {
- la->l2_psm = chan->psm;
+ la->l2_psm = l2cap_pi(sk)->psm;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(chan->dcid);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
} else {
- la->l2_psm = chan->sport;
+ la->l2_psm = l2cap_pi(sk)->sport;
bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(chan->scid);
+ la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
}
return 0;
@@ -309,7 +389,6 @@
static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
@@ -325,13 +404,13 @@
switch (optname) {
case L2CAP_OPTIONS:
memset(&opts, 0, sizeof(opts));
- opts.imtu = chan->imtu;
- opts.omtu = chan->omtu;
- opts.flush_to = chan->flush_to;
- opts.mode = chan->mode;
- opts.fcs = chan->fcs;
- opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -340,7 +419,7 @@
break;
case L2CAP_LM:
- switch (chan->sec_level) {
+ switch (l2cap_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = L2CAP_LM_AUTH;
break;
@@ -356,12 +435,15 @@
break;
}
- if (chan->role_switch)
+ if (l2cap_pi(sk)->role_switch)
opt |= L2CAP_LM_MASTER;
- if (chan->force_reliable)
+ if (l2cap_pi(sk)->force_reliable)
opt |= L2CAP_LM_RELIABLE;
+ if (l2cap_pi(sk)->flushable)
+ opt |= L2CAP_LM_FLUSHABLE;
+
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
@@ -374,9 +456,8 @@
break;
}
- memset(&cinfo, 0, sizeof(cinfo));
- cinfo.hci_handle = chan->conn->hcon->handle;
- memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -396,7 +477,6 @@
static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
int len, err = 0;
@@ -416,17 +496,17 @@
switch (optname) {
case BT_SECURITY:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ sec.level = l2cap_pi(sk)->sec_level;
if (sk->sk_state == BT_CONNECTED)
- sec.key_size = chan->conn->hcon->enc_key_size;
+ sec.key_size = l2cap_pi(sk)->conn->hcon->enc_key_size;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
@@ -445,12 +525,6 @@
break;
- case BT_FLUSHABLE:
- if (put_user(chan->flushable, (u32 __user *) optval))
- err = -EFAULT;
-
- break;
-
case BT_POWER:
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
&& sk->sk_type != SOCK_RAW) {
@@ -458,7 +532,7 @@
break;
}
- pwr.force_active = chan->force_active;
+ pwr.force_active = l2cap_pi(sk)->force_active;
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -466,6 +540,11 @@
break;
+ case BT_AMP_POLICY:
+ if (put_user(l2cap_pi(sk)->amp_pref, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -478,29 +557,30 @@
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, err = 0;
+ int len, le_sock, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
+ le_sock = l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA;
+
switch (optname) {
case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED) {
+ if (sk->sk_state == BT_CONNECTED && !le_sock) {
err = -EINVAL;
break;
}
- opts.imtu = chan->imtu;
- opts.omtu = chan->omtu;
- opts.flush_to = chan->flush_to;
- opts.mode = chan->mode;
- opts.fcs = chan->fcs;
- opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.imtu = l2cap_pi(sk)->imtu;
+ opts.omtu = l2cap_pi(sk)->omtu;
+ opts.flush_to = l2cap_pi(sk)->flush_to;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
+ opts.max_tx = l2cap_pi(sk)->max_tx;
+ opts.txwin_size = l2cap_pi(sk)->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -508,15 +588,28 @@
break;
}
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ if ((opts.imtu || opts.omtu) && le_sock &&
+ (sk->sk_state == BT_CONNECTED)) {
+ if (opts.imtu >= L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->imtu = opts.imtu;
+ if (opts.omtu >= L2CAP_LE_DEFAULT_MTU)
+ l2cap_pi(sk)->omtu = opts.omtu;
+ if (opts.imtu < L2CAP_LE_DEFAULT_MTU ||
+ opts.omtu < L2CAP_LE_DEFAULT_MTU)
+ err = -EINVAL;
+ break;
+ }
+
+ if (opts.txwin_size < 1 ||
+ opts.txwin_size > L2CAP_TX_WIN_MAX_EXTENDED) {
err = -EINVAL;
break;
}
- chan->mode = opts.mode;
- switch (chan->mode) {
+ l2cap_pi(sk)->mode = opts.mode;
+ switch (l2cap_pi(sk)->mode) {
case L2CAP_MODE_BASIC:
- clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
@@ -528,11 +621,11 @@
break;
}
- chan->imtu = opts.imtu;
- chan->omtu = opts.omtu;
- chan->fcs = opts.fcs;
- chan->max_tx = opts.max_tx;
- chan->tx_win = (__u8)opts.txwin_size;
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->fcs = opts.fcs;
+ l2cap_pi(sk)->max_tx = opts.max_tx;
+ l2cap_pi(sk)->tx_win = opts.txwin_size;
break;
case L2CAP_LM:
@@ -542,14 +635,15 @@
}
if (opt & L2CAP_LM_AUTH)
- chan->sec_level = BT_SECURITY_LOW;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & L2CAP_LM_ENCRYPT)
- chan->sec_level = BT_SECURITY_MEDIUM;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & L2CAP_LM_SECURE)
- chan->sec_level = BT_SECURITY_HIGH;
+ l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
- chan->role_switch = (opt & L2CAP_LM_MASTER);
- chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
+ l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE);
break;
default:
@@ -564,7 +658,6 @@
static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
struct l2cap_conn *conn;
@@ -583,8 +676,8 @@
switch (optname) {
case BT_SECURITY:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
@@ -603,10 +696,10 @@
break;
}
- chan->sec_level = sec.level;
+ l2cap_pi(sk)->sec_level = sec.level;
- conn = chan->conn;
- if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+ conn = l2cap_pi(sk)->conn;
+ if (conn && l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
@@ -634,45 +727,44 @@
bt_sk(sk)->defer_setup = opt;
break;
- case BT_FLUSHABLE:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if (opt > BT_FLUSHABLE_ON) {
- err = -EINVAL;
- break;
- }
-
- if (opt == BT_FLUSHABLE_OFF) {
- struct l2cap_conn *conn = chan->conn;
- /* proceed further only when we have l2cap_conn and
- No Flush support in the LM */
- if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
- err = -EINVAL;
- break;
- }
- }
-
- chan->flushable = opt;
- break;
-
case BT_POWER:
- if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
- chan->chan_type != L2CAP_CHAN_RAW) {
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
err = -EINVAL;
break;
}
- pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+ pwr.force_active = 1;
len = min_t(unsigned int, sizeof(pwr), optlen);
if (copy_from_user((char *) &pwr, optval, len)) {
err = -EFAULT;
break;
}
- chan->force_active = pwr.force_active;
+ l2cap_pi(sk)->force_active = pwr.force_active;
+ break;
+
+ case BT_AMP_POLICY:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if ((opt > BT_AMP_POLICY_PREFER_BR_EDR) ||
+ ((l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) &&
+ (l2cap_pi(sk)->mode != L2CAP_MODE_STREAMING))) {
+ err = -EINVAL;
+ break;
+ }
+
+ l2cap_pi(sk)->amp_pref = (u8) opt;
+ BT_DBG("BT_AMP_POLICY now %d", opt);
+
+ if ((sk->sk_state == BT_CONNECTED) &&
+ (l2cap_pi(sk)->amp_move_role == L2CAP_AMP_MOVE_NONE) &&
+ (l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
+ l2cap_amp_move_init(sk);
+
break;
default:
@@ -687,8 +779,11 @@
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ struct sk_buff_head seg_queue;
int err;
+ u8 amp_id;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -702,12 +797,102 @@
lock_sock(sk);
if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
- return -ENOTCONN;
+ err = -ENOTCONN;
+ goto done;
}
- err = l2cap_chan_send(chan, msg, len);
+ /* Connectionless channel */
+ if (sk->sk_type == SOCK_DGRAM) {
+ skb = l2cap_create_connless_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ } else {
+ l2cap_do_send(sk, skb);
+ err = len;
+ }
+ goto done;
+ }
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+
+ l2cap_do_send(sk, skb);
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+
+ /* Check outgoing MTU */
+ if (len > pi->omtu) {
+ err = -EMSGSIZE;
+ goto done;
+ }
+
+ __skb_queue_head_init(&seg_queue);
+
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ amp_id = pi->amp_id;
+ err = l2cap_segment_sdu(sk, &seg_queue, msg, len, 0);
+
+ /* The socket lock is released while segmenting, so check
+ * that the socket is still connected
+ */
+ if (sk->sk_state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
+ }
+
+ if (err) {
+ BT_DBG("Error %d, sk_sndbuf %d, sk_wmem_alloc %d",
+ err, sk->sk_sndbuf,
+ atomic_read(&sk->sk_wmem_alloc));
+ break;
+ }
+
+ if (pi->amp_id != amp_id) {
+ /* Channel moved while unlocked. Resegment. */
+ err = l2cap_resegment_queue(sk, &seg_queue);
+
+ if (err)
+ break;
+ }
+
+ if (pi->mode != L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_tx(sk, 0, &seg_queue,
+ L2CAP_ERTM_EVENT_DATA_REQUEST);
+ else
+ err = l2cap_strm_tx(sk, &seg_queue);
+ if (!err)
+ err = len;
+
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", pi->mode);
+ err = -EBADFD;
+ }
+
+done:
release_sock(sk);
return err;
}
@@ -715,15 +900,43 @@
static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
int err;
lock_sock(sk);
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ u8 buf[128];
+
+ if (l2cap_pi(sk)->amp_id) {
+ /* Physical link must be brought up before connection
+ * completes.
+ */
+ amp_accept_physical(conn, l2cap_pi(sk)->amp_id, sk);
+ release_sock(sk);
+ return 0;
+ }
+
sk->sk_state = BT_CONFIG;
- __l2cap_connect_rsp_defer(pi->chan);
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+
+ if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
+ release_sock(sk);
+ return 0;
+ }
+
+ l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
+
release_sock(sk);
return 0;
}
@@ -735,39 +948,15 @@
else
err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
- if (pi->chan->mode != L2CAP_MODE_ERTM)
- return err;
+ l2cap_ertm_recv_done(sk);
- /* Attempt to put pending rx data in the socket buffer */
-
- lock_sock(sk);
-
- if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
- goto done;
-
- if (pi->rx_busy_skb) {
- if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
- pi->rx_busy_skb = NULL;
- else
- goto done;
- }
-
- /* Restore data flow when half of the receive buffer is
- * available. This avoids resending large numbers of
- * frames.
- */
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
- l2cap_chan_busy(pi->chan, 0);
-
-done:
- release_sock(sk);
return err;
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
-static void l2cap_sock_kill(struct sock *sk)
+void l2cap_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
@@ -775,16 +964,95 @@
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
-
- l2cap_chan_destroy(l2cap_pi(sk)->chan);
+ bt_sock_unlink(&l2cap_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
+/* Must be called on unlocked socket. */
+static void l2cap_sock_close(struct sock *sk)
+{
+ l2cap_sock_clear_timer(sk);
+ lock_sock(sk);
+ __l2cap_sock_close(sk, ECONNRESET);
+ release_sock(sk);
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL)))
+ l2cap_sock_close(sk);
+
+ parent->sk_state = BT_CLOSED;
+ sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+void __l2cap_sock_close(struct sock *sk, int reason)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+
+ switch (sk->sk_state) {
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
+ l2cap_send_disconn_req(conn, sk, reason);
+ } else
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT2:
+ if ((sk->sk_type == SOCK_SEQPACKET ||
+ sk->sk_type == SOCK_STREAM) &&
+ conn->hcon->type == ACL_LINK) {
+ struct l2cap_conn_rsp rsp;
+ __u16 result;
+
+ if (bt_sk(sk)->defer_setup)
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+ sk->sk_state = BT_DISCONN;
+
+ rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
+ L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+ }
+
+ l2cap_chan_del(sk, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(sk, reason);
+ break;
+
+ default:
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+ }
+}
+
static int l2cap_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -794,11 +1062,15 @@
lock_sock(sk);
if (!sk->sk_shutdown) {
- if (chan->mode == L2CAP_MODE_ERTM)
+
+ if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
err = __l2cap_wait_ack(sk);
+ l2cap_ertm_shutdown(sk);
+ }
sk->sk_shutdown = SHUTDOWN_MASK;
- l2cap_chan_close(chan, 0);
+ l2cap_sock_clear_timer(sk);
+ __l2cap_sock_close(sk, 0);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -829,149 +1101,96 @@
return err;
}
-static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
-{
- struct sock *sk, *parent = data;
-
- sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
- GFP_ATOMIC);
- if (!sk)
- return NULL;
-
- l2cap_sock_init(sk, parent);
-
- return l2cap_pi(sk)->chan;
-}
-
-static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
-{
- int err;
- struct sock *sk = data;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- if (pi->rx_busy_skb)
- return -ENOMEM;
-
- err = sock_queue_rcv_skb(sk, skb);
-
- /* For ERTM, handle one skb that doesn't fit into the recv
- * buffer. This is important to do because the data frames
- * have already been acked, so the skb cannot be discarded.
- *
- * Notify the l2cap core that the buffer is full, so the
- * LOCAL_BUSY state is entered and no more frames are
- * acked and reassembled until there is buffer space
- * available.
- */
- if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
- pi->rx_busy_skb = skb;
- l2cap_chan_busy(pi->chan, 1);
- err = 0;
- }
-
- return err;
-}
-
-static void l2cap_sock_close_cb(void *data)
-{
- struct sock *sk = data;
-
- l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_state_change_cb(void *data, int state)
-{
- struct sock *sk = data;
-
- sk->sk_state = state;
-}
-
-static struct l2cap_ops l2cap_chan_ops = {
- .name = "L2CAP Socket Interface",
- .new_connection = l2cap_sock_new_connection_cb,
- .recv = l2cap_sock_recv_cb,
- .close = l2cap_sock_close_cb,
- .state_change = l2cap_sock_state_change_cb,
-};
-
static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
- if (l2cap_pi(sk)->rx_busy_skb) {
- kfree_skb(l2cap_pi(sk)->rx_busy_skb);
- l2cap_pi(sk)->rx_busy_skb = NULL;
- }
-
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
+
+ l2cap_ertm_destruct(sk);
}
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+static void set_default_config(struct l2cap_conf_prm *conf_prm)
+{
+ conf_prm->fcs = L2CAP_FCS_CRC16;
+ conf_prm->retrans_timeout = 0;
+ conf_prm->monitor_timeout = 0;
+ conf_prm->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+}
+
+void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_chan *chan = pi->chan;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %p parent %p", sk, parent);
if (parent) {
- struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
-
sk->sk_type = parent->sk_type;
+ sk->sk_rcvbuf = parent->sk_rcvbuf;
+ sk->sk_sndbuf = parent->sk_sndbuf;
bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
- chan->chan_type = pchan->chan_type;
- chan->imtu = pchan->imtu;
- chan->omtu = pchan->omtu;
- chan->conf_state = pchan->conf_state;
- chan->mode = pchan->mode;
- chan->fcs = pchan->fcs;
- chan->max_tx = pchan->max_tx;
- chan->tx_win = pchan->tx_win;
- chan->sec_level = pchan->sec_level;
- chan->role_switch = pchan->role_switch;
- chan->force_reliable = pchan->force_reliable;
- chan->flushable = pchan->flushable;
- chan->force_active = pchan->force_active;
+ pi->imtu = l2cap_pi(parent)->imtu;
+ pi->omtu = l2cap_pi(parent)->omtu;
+ pi->conf_state = l2cap_pi(parent)->conf_state;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
+ pi->max_tx = l2cap_pi(parent)->max_tx;
+ pi->tx_win = l2cap_pi(parent)->tx_win;
+ pi->sec_level = l2cap_pi(parent)->sec_level;
+ pi->role_switch = l2cap_pi(parent)->role_switch;
+ pi->force_reliable = l2cap_pi(parent)->force_reliable;
+ pi->flushable = l2cap_pi(parent)->flushable;
+ pi->force_active = l2cap_pi(parent)->force_active;
+ pi->amp_pref = l2cap_pi(parent)->amp_pref;
} else {
-
- switch (sk->sk_type) {
- case SOCK_RAW:
- chan->chan_type = L2CAP_CHAN_RAW;
- break;
- case SOCK_DGRAM:
- chan->chan_type = L2CAP_CHAN_CONN_LESS;
- break;
- case SOCK_SEQPACKET:
- case SOCK_STREAM:
- chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
- break;
- }
-
- chan->imtu = L2CAP_DEFAULT_MTU;
- chan->omtu = 0;
+ pi->imtu = L2CAP_DEFAULT_MTU;
+ pi->omtu = 0;
if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- chan->mode = L2CAP_MODE_ERTM;
- set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ pi->mode = L2CAP_MODE_ERTM;
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
} else {
- chan->mode = L2CAP_MODE_BASIC;
+ pi->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->role_switch = 0;
- chan->force_reliable = 0;
- chan->flushable = BT_FLUSHABLE_OFF;
- chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+ pi->reconf_state = L2CAP_RECONF_NONE;
+ pi->max_tx = L2CAP_DEFAULT_MAX_TX;
+ pi->fcs = L2CAP_FCS_CRC16;
+ pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ pi->sec_level = BT_SECURITY_LOW;
+ pi->role_switch = 0;
+ pi->force_reliable = 0;
+ pi->flushable = 0;
+ pi->force_active = 1;
+ pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR;
}
/* Default config options */
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ sk->sk_backlog_rcv = l2cap_data_channel;
+ pi->ampcon = NULL;
+ pi->ampchan = NULL;
+ pi->conf_len = 0;
+ pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ pi->scid = 0;
+ pi->dcid = 0;
+ pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
+ pi->extended_control = 0;
- chan->data = sk;
- chan->ops = &l2cap_chan_ops;
+ pi->local_conf.fcs = pi->fcs;
+ if (pi->mode == L2CAP_MODE_BASIC) {
+ pi->local_conf.retrans_timeout = 0;
+ pi->local_conf.monitor_timeout = 0;
+ } else {
+ pi->local_conf.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ pi->local_conf.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ }
+
+ pi->local_conf.flush_to = pi->flush_to;
+
+ set_default_config(&pi->remote_conf);
+
+ skb_queue_head_init(TX_QUEUE(sk));
+ skb_queue_head_init(SREJ_QUEUE(sk));
}
static struct proto l2cap_proto = {
@@ -980,10 +1199,9 @@
.obj_size = sizeof(struct l2cap_pinfo)
};
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
- struct l2cap_chan *chan;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
if (!sk)
@@ -993,21 +1211,16 @@
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
- if (!chan) {
- l2cap_sock_kill(sk);
- return NULL;
- }
+ setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
- l2cap_pi(sk)->chan = chan;
-
+ bt_sock_link(&l2cap_sk_list, sk);
return sk;
}
@@ -1037,7 +1250,7 @@
return 0;
}
-static const struct proto_ops l2cap_sock_ops = {
+const struct proto_ops l2cap_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = l2cap_sock_release,
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 86a6bed..b826d1b 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -59,7 +59,7 @@
EXPORT_SYMBOL(batostr);
/* Bluetooth error codes to Unix errno mapping */
-int bt_to_errno(__u16 code)
+int bt_err(__u16 code)
{
switch (code) {
case 0:
@@ -149,23 +149,4 @@
return ENOSYS;
}
}
-EXPORT_SYMBOL(bt_to_errno);
-
-int bt_printk(const char *level, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = printk("%sBluetooth: %pV\n", level, &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(bt_printk);
+EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 9832721..6d45a60 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,7 +41,7 @@
void *user_data;
};
-static LIST_HEAD(cmd_list);
+LIST_HEAD(cmd_list);
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
@@ -179,7 +179,7 @@
hci_del_off_timer(hdev);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
set_bit(HCI_MGMT, &hdev->flags);
@@ -208,7 +208,7 @@
memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -316,7 +316,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
up = test_bit(HCI_UP, &hdev->flags);
if ((cp->val && up) || (!cp->val && !up)) {
@@ -343,7 +343,7 @@
err = 0;
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -368,7 +368,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -403,7 +403,7 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -429,7 +429,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -463,7 +463,7 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -522,7 +522,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (cp->val)
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -538,7 +538,7 @@
err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -739,7 +739,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
if (!uuid) {
@@ -763,7 +763,7 @@
err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -788,7 +788,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
@@ -823,7 +823,7 @@
err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -847,7 +847,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->major_class = cp->major;
hdev->minor_class = cp->minor;
@@ -857,7 +857,7 @@
if (err == 0)
err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -879,7 +879,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
BT_DBG("hci%u enable %d", index, cp->enable);
@@ -897,7 +897,7 @@
err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -931,7 +931,7 @@
BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
key_count);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -950,7 +950,7 @@
i += sizeof(*key) + key->dlen;
- if (key->type == HCI_LK_SMP_LTK) {
+ if (key->type == KEY_TYPE_LTK) {
struct key_master_id *id = (void *) key->data;
if (key->dlen != sizeof(struct key_master_id))
@@ -962,13 +962,13 @@
continue;
}
- hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
+ hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
}
err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -990,7 +990,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_remove_link_key(hdev, &cp->bdaddr);
if (err < 0) {
@@ -1009,11 +1009,11 @@
put_unaligned_le16(conn->handle, &dc.handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
}
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1039,7 +1039,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -1052,9 +1052,6 @@
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn)
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
-
if (!conn) {
err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
goto failed;
@@ -1074,7 +1071,7 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1095,7 +1092,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
count = 0;
list_for_each(p, &hdev->conn_hash.list) {
@@ -1111,6 +1108,8 @@
put_unaligned_le16(count, &rp->conn_count);
+ read_lock(&hci_dev_list_lock);
+
i = 0;
list_for_each(p, &hdev->conn_hash.list) {
struct hci_conn *c = list_entry(p, struct hci_conn, list);
@@ -1118,41 +1117,22 @@
bacpy(&rp->conn[i++], &c->dst);
}
+ read_unlock(&hci_dev_list_lock);
+
err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
unlock:
kfree(rp);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int send_pin_code_neg_reply(struct sock *sk, u16 index,
- struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
-{
- struct pending_cmd *cmd;
- int err;
-
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
- sizeof(*cp));
- if (!cmd)
- return -ENOMEM;
-
- err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
- &cp->bdaddr);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
- return err;
-}
-
static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct hci_conn *conn;
struct mgmt_cp_pin_code_reply *cp;
- struct mgmt_cp_pin_code_neg_reply ncp;
struct hci_cp_pin_code_reply reply;
struct pending_cmd *cmd;
int err;
@@ -1168,32 +1148,13 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
goto failed;
}
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
- goto failed;
- }
-
- if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
- bacpy(&ncp.bdaddr, &cp->bdaddr);
-
- BT_ERR("PIN code is not 16 bytes long");
-
- err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
- if (err >= 0)
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- EINVAL);
-
- goto failed;
- }
-
cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
if (!cmd) {
err = -ENOMEM;
@@ -1202,14 +1163,14 @@
bacpy(&reply.bdaddr, &cp->bdaddr);
reply.pin_len = cp->pin_len;
- memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
+ memcpy(reply.pin_code, cp->pin_code, 16);
err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1220,6 +1181,7 @@
{
struct hci_dev *hdev;
struct mgmt_cp_pin_code_neg_reply *cp;
+ struct pending_cmd *cmd;
int err;
BT_DBG("");
@@ -1235,7 +1197,7 @@
return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1243,10 +1205,20 @@
goto failed;
}
- err = send_pin_code_neg_reply(sk, index, hdev, cp);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
+ &cp->bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1269,14 +1241,14 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
hdev->io_capability);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1362,7 +1334,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (cp->io_cap == 0x03) {
sec_level = BT_SECURITY_MEDIUM;
@@ -1372,7 +1344,8 @@
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
}
- conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level, auth_type);
+ conn = hci_connect(hdev, ACL_LINK, 0, &cp->bdaddr, sec_level,
+ auth_type);
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
goto unlock;
@@ -1404,7 +1377,7 @@
err = 0;
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1436,7 +1409,7 @@
if (!hdev)
return cmd_status(sk, index, mgmt_op, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1454,7 +1427,7 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1478,7 +1451,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
if (!cmd) {
@@ -1493,7 +1466,7 @@
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1512,7 +1485,7 @@
return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1542,7 +1515,7 @@
mgmt_pending_remove(cmd);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1566,7 +1539,7 @@
return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
cp->randomizer);
@@ -1576,7 +1549,7 @@
err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1600,7 +1573,7 @@
return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
ENODEV);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
if (err < 0)
@@ -1610,140 +1583,7 @@
err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
NULL, 0);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int start_discovery(struct sock *sk, u16 index)
-{
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
- struct pending_cmd *cmd;
- struct hci_dev *hdev;
- int err;
-
- BT_DBG("hci%u", index);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, 3);
- cp.length = 0x08;
- cp.num_rsp = 0x00;
-
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int stop_discovery(struct sock *sk, u16 index)
-{
- struct hci_dev *hdev;
- struct pending_cmd *cmd;
- int err;
-
- BT_DBG("hci%u", index);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int block_device(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_block_device *cp;
- int err;
-
- BT_DBG("hci%u", index);
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- ENODEV);
-
- err = hci_blacklist_add(hdev, &cp->bdaddr);
-
- if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
- else
- err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
- NULL, 0);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_unblock_device *cp;
- int err;
-
- BT_DBG("hci%u", index);
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- ENODEV);
-
- err = hci_blacklist_del(hdev, &cp->bdaddr);
-
- if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
- else
- err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- NULL, 0);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1857,18 +1697,7 @@
err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
len);
break;
- case MGMT_OP_START_DISCOVERY:
- err = start_discovery(sk, index);
- break;
- case MGMT_OP_STOP_DISCOVERY:
- err = stop_discovery(sk, index);
- break;
- case MGMT_OP_BLOCK_DEVICE:
- err = block_device(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_UNBLOCK_DEVICE:
- err = unblock_device(sk, index, buf + sizeof(*hdr), len);
- break;
+
default:
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode, 0x01);
@@ -1975,7 +1804,7 @@
return ret;
}
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
{
struct mgmt_ev_new_key *ev;
int err, total;
@@ -1989,8 +1818,8 @@
ev->key.type = key->type;
memcpy(ev->key.val, key->val, 16);
ev->key.pin_len = key->pin_len;
+ ev->old_key_type = old_key_type;
ev->key.dlen = key->dlen;
- ev->store_hint = persistent;
memcpy(ev->key.data, key->data, key->dlen);
@@ -2070,12 +1899,11 @@
return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
{
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
NULL);
@@ -2123,15 +1951,13 @@
return err;
}
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
- u8 confirm_hint)
+int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value)
{
struct mgmt_ev_user_confirm_request ev;
BT_DBG("hci%u", index);
bacpy(&ev.bdaddr, bdaddr);
- ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
@@ -2280,9 +2106,3 @@
return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
}
-
-int mgmt_discovering(u16 index, u8 discovering)
-{
- return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
- sizeof(discovering), NULL);
-}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c2486a5..6dac31f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,6 +62,7 @@
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
+static unsigned long rfcomm_event;
static LIST_HEAD(session_list);
@@ -119,6 +120,7 @@
{
if (!rfcomm_thread)
return;
+ set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
wake_up_process(rfcomm_thread);
}
@@ -230,8 +232,6 @@
static inline int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
-
__u8 auth_type;
switch (d->sec_level) {
@@ -246,7 +246,8 @@
break;
}
- return hci_conn_security(conn->hcon, d->sec_level, auth_type);
+ return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
+ auth_type);
}
static void rfcomm_session_timeout(unsigned long arg)
@@ -708,10 +709,10 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
- l2cap_pi(sk)->chan->sec_level = sec_level;
+ l2cap_pi(sk)->imtu = l2cap_mtu;
+ l2cap_pi(sk)->sec_level = sec_level;
if (l2cap_ertm)
- l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
+ l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
release_sock(sk);
s = rfcomm_session_add(sock, BT_BOUND);
@@ -1239,7 +1240,6 @@
void rfcomm_dlc_accept(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
BT_DBG("dlc %p", d);
@@ -1253,7 +1253,7 @@
rfcomm_dlc_unlock(d);
if (d->role_switch)
- hci_conn_switch_role(conn->hcon, 0x00);
+ hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
}
@@ -1852,7 +1852,10 @@
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- rfcomm_recv_frame(s, skb);
+ if (!skb_linearize(skb))
+ rfcomm_recv_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state == BT_CLOSED) {
@@ -1889,8 +1892,7 @@
/* We should adjust MTU on incoming sessions.
* L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
- l2cap_pi(nsock->sk)->chan->imtu) - 5;
+ s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
rfcomm_schedule();
} else
@@ -1909,7 +1911,7 @@
/* We can adjust MTU on outgoing sessions.
* L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
+ s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5;
rfcomm_send_sabm(s, 0);
break;
@@ -1992,7 +1994,7 @@
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ l2cap_pi(sk)->imtu = l2cap_mtu;
release_sock(sk);
/* Start listening on the socket */
@@ -2035,18 +2037,19 @@
rfcomm_add_listener(BDADDR_ANY);
- while (1) {
+ while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- break;
+ if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
+ /* No pending events. Let's sleep.
+ * Incoming connections and data will wake us up. */
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
/* Process stuff */
+ clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
rfcomm_process_sessions();
-
- schedule();
}
- __set_current_state(TASK_RUNNING);
rfcomm_kill_listener();
@@ -2092,7 +2095,7 @@
if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
continue;
- if (!status && hci_conn_check_secure(conn, d->sec_level))
+ if (!status)
set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
else
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index b02f0d4..66cc1f0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,6 +485,11 @@
lock_sock(sk);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
@@ -496,20 +501,19 @@
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(nsk = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- nsk = bt_accept_dequeue(sk, newsock);
- if (nsk)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -517,12 +521,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -679,8 +679,7 @@
{
struct sock *sk = sock->sk;
struct bt_security sec;
- int err = 0;
- size_t len;
+ int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
@@ -742,8 +741,8 @@
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
+ struct sock *l2cap_sk;
struct rfcomm_conninfo cinfo;
- struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
@@ -786,10 +785,10 @@
break;
}
+ l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
- memset(&cinfo, 0, sizeof(cinfo));
- cinfo.hci_handle = conn->hcon->handle;
- memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
+ cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index d3d48b5..e692b9d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -198,7 +198,8 @@
pkt_type &= SCO_ESCO_MASK;
}
- hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, pkt_type, dst,
+ BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
@@ -372,15 +373,6 @@
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn) {
- sk->sk_state = BT_DISCONN;
- sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
- hci_conn_put(sco_pi(sk)->conn->hcon);
- sco_pi(sk)->conn->hcon = NULL;
- } else
- sco_chan_del(sk, ECONNRESET);
- break;
-
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
@@ -579,26 +571,30 @@
lock_sock(sk);
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (1) {
+ while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
- ch = bt_accept_dequeue(sk, newsock);
- if (ch)
- break;
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
- if (!timeo) {
- err = -EAGAIN;
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
@@ -606,12 +602,8 @@
err = sock_intr_errno(timeo);
break;
}
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
}
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -844,9 +836,7 @@
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
-
- if (conn->hcon)
- hci_conn_put(conn->hcon);
+ hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
@@ -948,7 +938,7 @@
if (conn)
sco_conn_ready(conn);
} else
- sco_conn_del(hcon, bt_to_errno(status));
+ sco_conn_del(hcon, bt_err(status));
return 0;
}
@@ -960,7 +950,7 @@
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
- sco_conn_del(hcon, bt_to_errno(reason));
+ sco_conn_del(hcon, bt_err(reason));
return 0;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b..e1c4ef3 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -25,7 +25,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
#include <linux/crypto.h>
-#include <linux/scatterlist.h>
#include <crypto/b128ops.h>
#define SMP_TIMEOUT 30000 /* 30 seconds */
@@ -146,7 +145,7 @@
}
static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
- u16 dlen, void *data)
+ u16 dlen, void *data)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
@@ -181,7 +180,7 @@
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ hci_send_acl(conn->hcon, NULL, skb, 0);
}
static __u8 seclevel_to_authreq(__u8 level)
@@ -201,11 +200,12 @@
struct smp_cmd_pairing *rsp,
__u8 authreq)
{
+ u8 all_keys = 0;
u8 dist_keys;
dist_keys = 0;
if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
- dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
+ dist_keys = SMP_DIST_ENC_KEY;
authreq |= SMP_AUTH_BONDING;
}
@@ -214,7 +214,7 @@
req->oob_flag = SMP_OOB_NOT_PRESENT;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
req->init_key_dist = dist_keys;
- req->resp_key_dist = dist_keys;
+ req->resp_key_dist = all_keys;
req->auth_req = authreq;
return;
}
@@ -222,7 +222,7 @@
rsp->io_capability = conn->hcon->io_capability;
rsp->oob_flag = SMP_OOB_NOT_PRESENT;
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- rsp->init_key_dist = req->init_key_dist & dist_keys;
+ rsp->init_key_dist = req->init_key_dist & all_keys;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
rsp->auth_req = authreq;
}
@@ -461,29 +461,25 @@
BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
- if (!lmp_host_le_capable(hcon->hdev))
- return 1;
-
if (IS_ERR(hcon->hdev->tfm))
return 1;
if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
- return 0;
+ return -EINPROGRESS;
if (sec_level == BT_SECURITY_LOW)
return 1;
- if (hcon->sec_level >= sec_level)
+ if (hcon->sec_level > sec_level)
return 1;
authreq = seclevel_to_authreq(sec_level);
if (hcon->link_mode & HCI_LM_MASTER) {
- struct smp_cmd_pairing cp;
struct link_key *key;
key = hci_find_link_key_type(hcon->hdev, conn->dst,
- HCI_LK_SMP_LTK);
+ KEY_TYPE_LTK);
if (key) {
struct key_master_id *master = (void *) key->data;
@@ -493,6 +489,10 @@
goto done;
}
+ }
+
+ if (hcon->link_mode & HCI_LM_MASTER) {
+ struct smp_cmd_pairing cp;
build_pairing_cmd(conn, &cp, NULL, authreq);
conn->preq[0] = SMP_CMD_PAIRING_REQ;
@@ -518,10 +518,18 @@
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
+ u8 rand[8];
+ int err;
skb_pull(skb, sizeof(*rp));
- memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
+ BT_DBG("conn %p", conn);
+
+ memset(rand, 0, sizeof(rand));
+
+ err = hci_add_ltk(conn->hcon->hdev, 0, conn->dst, 0, 0, rand, rp->ltk);
+ if (err)
+ return SMP_UNSPECIFIED;
return 0;
}
@@ -529,11 +537,25 @@
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_cmd_pairing *paircmd = (void *) &conn->prsp[1];
+ struct link_key *key;
+ struct key_master_id *id;
+ u8 keydist = paircmd->init_key_dist;
skb_pull(skb, sizeof(*rp));
+ key = hci_find_link_key_type(conn->hcon->hdev, conn->dst, KEY_TYPE_LTK);
+ if (key == NULL)
+ return SMP_UNSPECIFIED;
+
+ BT_DBG("keydist 0x%x", keydist);
+
+ id = (void *) key->data;
+ id->ediv = rp->ediv;
+ memcpy(id->rand, rp->rand, sizeof(rp->rand));
+
hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
- rp->ediv, rp->rand, conn->tk);
+ rp->ediv, rp->rand, key->val);
smp_distribute_keys(conn, 1);
@@ -546,12 +568,6 @@
__u8 reason;
int err = 0;
- if (!lmp_host_le_capable(conn->hcon->hdev)) {
- err = -ENOTSUPP;
- reason = SMP_PAIRING_NOTSUPP;
- goto done;
- }
-
if (IS_ERR(conn->hcon->hdev->tfm)) {
err = PTR_ERR(conn->hcon->hdev->tfm);
reason = SMP_PAIRING_NOTSUPP;
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index dca2082..5757829 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -69,4 +69,3 @@
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
-
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5eb7af2..f2d9813 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3350,9 +3350,9 @@
int family = addr->sa_family;
unsigned int bucket;
- struct in_addr *in;
+ struct in_addr *in = NULL;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr *in6;
+ struct in6_addr *in6 = NULL;
#endif
if (family == AF_INET) {
in = &((struct sockaddr_in *)addr)->sin_addr;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 708dc20..378ddf9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1955,6 +1955,49 @@
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
+/*
+ * tcp_v4_nuke_addr - destroy all sockets on the given local address
+ */
+void tcp_v4_nuke_addr(__u32 saddr)
+{
+ unsigned int bucket;
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (inet->inet_rcv_saddr != saddr)
+ continue;
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf2cf62..713e09d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1572,6 +1572,16 @@
return addrconf_ifid_infiniband(eui, dev);
case ARPHRD_SIT:
return addrconf_ifid_sit(eui, dev);
+ case ARPHRD_RAWIP: {
+ struct in6_addr lladdr;
+
+ if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+ get_random_bytes(eui, 8);
+ else
+ memcpy(eui, lladdr.s6_addr + 8, 8);
+
+ return 0;
+ }
}
return -1;
}
@@ -2396,6 +2406,7 @@
(dev->type != ARPHRD_FDDI) &&
(dev->type != ARPHRD_IEEE802_TR) &&
(dev->type != ARPHRD_ARCNET) &&
+ (dev->type != ARPHRD_RAWIP) &&
(dev->type != ARPHRD_INFINIBAND)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9d4b165..77b1a28 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -437,8 +437,9 @@
}
/* XXX: idev->cnf.proxy_ndp? */
- if (net->ipv6.devconf_all->proxy_ndp &&
- pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ if ((net->ipv6.devconf_all->proxy_ndp == 1 &&
+ pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0))
+ || net->ipv6.devconf_all->proxy_ndp >= 2) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0)
return ip6_input(skb);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2a318f2..5cea020 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -27,6 +27,7 @@
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
+ u8 enable_flow;
};
@@ -96,6 +97,9 @@
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
+ if (!q->enable_flow)
+ return NULL;
+
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->ops->peek(qdisc);
@@ -110,6 +114,9 @@
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
+ if (!q->enable_flow)
+ return NULL;
+
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
@@ -150,6 +157,7 @@
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
+ q->enable_flow = 1;
}
static void
@@ -182,6 +190,7 @@
}
sch_tree_lock(sch);
+ q->enable_flow = qopt->enable_flow;
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
@@ -245,6 +254,7 @@
struct tc_prio_qopt opt;
opt.bands = q->bands;
+ opt.enable_flow = q->enable_flow;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);