blob: 3a66b8c10e09ab1876e7b678efc65073699342a0 [file] [log] [blame]
Sjur Braendelandb482cd22010-03-30 13:56:23 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
Joe Perchesb31fa5b2010-09-05 21:31:11 +00006
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
Sjur Braendelandb482cd22010-03-30 13:56:23 +00009#include <linux/stddef.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +000012#include <linux/rculist.h>
Sjur Braendelandb482cd22010-03-30 13:56:23 +000013#include <net/caif/cfpkt.h>
14#include <net/caif/cfmuxl.h>
15#include <net/caif/cfsrvl.h>
16#include <net/caif/cffrml.h>
17
18#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
19
20#define CAIF_CTRL_CHANNEL 0
21#define UP_CACHE_SIZE 8
22#define DN_CACHE_SIZE 8
23
24struct cfmuxl {
25 struct cflayer layer;
26 struct list_head srvl_list;
27 struct list_head frml_list;
28 struct cflayer *up_cache[UP_CACHE_SIZE];
29 struct cflayer *dn_cache[DN_CACHE_SIZE];
30 /*
31 * Set when inserting or removing downwards layers.
32 */
33 spinlock_t transmit_lock;
34
35 /*
36 * Set when inserting or removing upwards layers.
37 */
38 spinlock_t receive_lock;
39
40};
41
42static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
43static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
44static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
45 int phyid);
46static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
47
48struct cflayer *cfmuxl_create(void)
49{
50 struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
51 if (!this)
52 return NULL;
53 memset(this, 0, sizeof(*this));
54 this->layer.receive = cfmuxl_receive;
55 this->layer.transmit = cfmuxl_transmit;
56 this->layer.ctrlcmd = cfmuxl_ctrlcmd;
57 INIT_LIST_HEAD(&this->srvl_list);
58 INIT_LIST_HEAD(&this->frml_list);
59 spin_lock_init(&this->transmit_lock);
60 spin_lock_init(&this->receive_lock);
61 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
62 return &this->layer;
63}
64
Sjur Braendelandb482cd22010-03-30 13:56:23 +000065int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
66{
67 struct cfmuxl *muxl = (struct cfmuxl *) layr;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +000068
69 spin_lock_bh(&muxl->transmit_lock);
70 list_add_rcu(&dn->node, &muxl->frml_list);
71 spin_unlock_bh(&muxl->transmit_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +000072 return 0;
73}
74
75static struct cflayer *get_from_id(struct list_head *list, u16 id)
76{
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +000077 struct cflayer *lyr;
78 list_for_each_entry_rcu(lyr, list, node) {
79 if (lyr->id == id)
80 return lyr;
Sjur Braendelandb482cd22010-03-30 13:56:23 +000081 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +000082
Sjur Braendelandb482cd22010-03-30 13:56:23 +000083 return NULL;
84}
85
sjur.brandeland@stericsson.com54e90fb2011-05-22 11:18:51 +000086int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
87{
88 struct cfmuxl *muxl = container_obj(layr);
89 struct cflayer *old;
90
91 spin_lock_bh(&muxl->receive_lock);
92
93 /* Two entries with same id is wrong, so remove old layer from mux */
94 old = get_from_id(&muxl->srvl_list, linkid);
95 if (old != NULL)
96 list_del_rcu(&old->node);
97
98 list_add_rcu(&up->node, &muxl->srvl_list);
99 spin_unlock_bh(&muxl->receive_lock);
100
101 return 0;
102}
103
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000104struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
105{
106 struct cfmuxl *muxl = container_obj(layr);
107 struct cflayer *dn;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000108 int idx = phyid % DN_CACHE_SIZE;
109
110 spin_lock_bh(&muxl->transmit_lock);
111 rcu_assign_pointer(muxl->dn_cache[idx], NULL);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000112 dn = get_from_id(&muxl->frml_list, phyid);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000113 if (dn == NULL)
114 goto out;
115
116 list_del_rcu(&dn->node);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000117 caif_assert(dn != NULL);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000118out:
119 spin_unlock_bh(&muxl->transmit_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000120 return dn;
121}
122
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000123static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
124{
125 struct cflayer *up;
126 int idx = id % UP_CACHE_SIZE;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000127 up = rcu_dereference(muxl->up_cache[idx]);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000128 if (up == NULL || up->id != id) {
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000129 spin_lock_bh(&muxl->receive_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000130 up = get_from_id(&muxl->srvl_list, id);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000131 rcu_assign_pointer(muxl->up_cache[idx], up);
132 spin_unlock_bh(&muxl->receive_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000133 }
134 return up;
135}
136
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000137static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
138{
139 struct cflayer *dn;
140 int idx = dev_info->id % DN_CACHE_SIZE;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000141 dn = rcu_dereference(muxl->dn_cache[idx]);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000142 if (dn == NULL || dn->id != dev_info->id) {
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000143 spin_lock_bh(&muxl->transmit_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000144 dn = get_from_id(&muxl->frml_list, dev_info->id);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000145 rcu_assign_pointer(muxl->dn_cache[idx], dn);
146 spin_unlock_bh(&muxl->transmit_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000147 }
148 return dn;
149}
150
151struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
152{
153 struct cflayer *up;
154 struct cfmuxl *muxl = container_obj(layr);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000155 int idx = id % UP_CACHE_SIZE;
156
sjur.brandeland@stericsson.com54e90fb2011-05-22 11:18:51 +0000157 if (id == 0) {
158 pr_warn("Trying to remove control layer\n");
159 return NULL;
160 }
161
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000162 spin_lock_bh(&muxl->receive_lock);
163 up = get_from_id(&muxl->srvl_list, id);
Sjur Braendeland5b208652010-04-28 08:54:36 +0000164 if (up == NULL)
Sjur Braendelanda9a8f102010-05-21 02:16:11 +0000165 goto out;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000166
167 rcu_assign_pointer(muxl->up_cache[idx], NULL);
168 list_del_rcu(&up->node);
Sjur Braendelanda9a8f102010-05-21 02:16:11 +0000169out:
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000170 spin_unlock_bh(&muxl->receive_lock);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000171 return up;
172}
173
174static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
175{
176 int ret;
177 struct cfmuxl *muxl = container_obj(layr);
178 u8 id;
179 struct cflayer *up;
180 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
Joe Perchesb31fa5b2010-09-05 21:31:11 +0000181 pr_err("erroneous Caif Packet\n");
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000182 cfpkt_destroy(pkt);
183 return -EPROTO;
184 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000185 rcu_read_lock();
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000186 up = get_up(muxl, id);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000187
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000188 if (up == NULL) {
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000189 pr_debug("Received data on unknown link ID = %d (0x%x)"
190 " up == NULL", id, id);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000191 cfpkt_destroy(pkt);
192 /*
193 * Don't return ERROR, since modem misbehaves and sends out
194 * flow on before linksetup response.
195 */
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000196
197 rcu_read_unlock();
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000198 return /* CFGLU_EPROT; */ 0;
199 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000200
201 /* We can't hold rcu_lock during receive, so take a ref count instead */
Sjur Braendeland5b208652010-04-28 08:54:36 +0000202 cfsrvl_get(up);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000203 rcu_read_unlock();
204
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000205 ret = up->receive(up, pkt);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000206
Sjur Braendeland5b208652010-04-28 08:54:36 +0000207 cfsrvl_put(up);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000208 return ret;
209}
210
211static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
212{
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000213 struct cfmuxl *muxl = container_obj(layr);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000214 int err;
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000215 u8 linkid;
216 struct cflayer *dn;
217 struct caif_payload_info *info = cfpkt_info(pkt);
Sjur Brændeland39b9afb2011-04-11 10:43:52 +0000218 BUG_ON(!info);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000219
220 rcu_read_lock();
221
Sjur Brændeland39b9afb2011-04-11 10:43:52 +0000222 dn = get_dn(muxl, info->dev_info);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000223 if (dn == NULL) {
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000224 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
Joe Perchesb31fa5b2010-09-05 21:31:11 +0000225 info->dev_info->id, info->dev_info->id);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000226 rcu_read_unlock();
227 cfpkt_destroy(pkt);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000228 return -ENOTCONN;
229 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000230
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000231 info->hdr_len += 1;
232 linkid = info->channel_id;
233 cfpkt_add_head(pkt, &linkid, 1);
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000234
235 /* We can't hold rcu_lock during receive, so take a ref count instead */
236 cffrml_hold(dn);
237
238 rcu_read_unlock();
239
240 err = dn->transmit(dn, pkt);
241
242 cffrml_put(dn);
243 return err;
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000244}
245
246static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
247 int phyid)
248{
249 struct cfmuxl *muxl = container_obj(layr);
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000250 struct cflayer *layer;
sjur.brandeland@stericsson.com54e90fb2011-05-22 11:18:51 +0000251 int idx;
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000252
253 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
sjur.brandeland@stericsson.com54e90fb2011-05-22 11:18:51 +0000255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) {
261
262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000268 /* NOTE: ctrlcmd is not allowed to block */
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000269 layer->ctrlcmd(layer, ctrl, phyid);
sjur.brandeland@stericsson.com54e90fb2011-05-22 11:18:51 +0000270 }
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000271 }
sjur.brandeland@stericsson.com0b1e9732011-05-13 02:43:59 +0000272 rcu_read_unlock();
Sjur Braendelandb482cd22010-03-30 13:56:23 +0000273}