blob: 0145bae0274f0162f68de48aebc9835ceef66dda [file] [log] [blame]
Sjur Braendelandc72dfae2010-03-30 13:56:25 +00001/*
2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to
8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */
11
12#include <linux/version.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/if_arp.h>
16#include <linux/net.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <net/netns/generic.h>
22#include <net/net_namespace.h>
23#include <net/pkt_sched.h>
24#include <net/caif/caif_device.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/caif_layer.h>
27#include <net/caif/cfpkt.h>
28#include <net/caif/cfcnfg.h>
29
30MODULE_LICENSE("GPL");
31#define TIMEOUT (HZ*5)
32
33/* Used for local tracking of the CAIF net devices */
34struct caif_device_entry {
35 struct cflayer layer;
36 struct list_head list;
37 atomic_t in_use;
38 atomic_t state;
39 u16 phyid;
40 struct net_device *netdev;
41 wait_queue_head_t event;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 spinlock_t lock;
48};
49
50struct caif_net {
51 struct caif_device_entry_list caifdevs;
52};
53
54static int caif_net_id;
55static struct cfcnfg *cfg;
56
57static struct caif_device_entry_list *caif_device_list(struct net *net)
58{
59 struct caif_net *caifn;
60 BUG_ON(!net);
61 caifn = net_generic(net, caif_net_id);
62 BUG_ON(!caifn);
63 return &caifn->caifdevs;
64}
65
66/* Allocate new CAIF device. */
67static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
68{
69 struct caif_device_entry_list *caifdevs;
70 struct caif_device_entry *caifd;
71 caifdevs = caif_device_list(dev_net(dev));
72 BUG_ON(!caifdevs);
73 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
74 if (!caifd)
75 return NULL;
76 caifd->netdev = dev;
77 list_add(&caifd->list, &caifdevs->list);
78 init_waitqueue_head(&caifd->event);
79 return caifd;
80}
81
82static struct caif_device_entry *caif_get(struct net_device *dev)
83{
84 struct caif_device_entry_list *caifdevs =
85 caif_device_list(dev_net(dev));
86 struct caif_device_entry *caifd;
87 BUG_ON(!caifdevs);
88 list_for_each_entry(caifd, &caifdevs->list, list) {
89 if (caifd->netdev == dev)
90 return caifd;
91 }
92 return NULL;
93}
94
95static void caif_device_destroy(struct net_device *dev)
96{
97 struct caif_device_entry_list *caifdevs =
98 caif_device_list(dev_net(dev));
99 struct caif_device_entry *caifd;
100 ASSERT_RTNL();
101 if (dev->type != ARPHRD_CAIF)
102 return;
103
104 spin_lock_bh(&caifdevs->lock);
105 caifd = caif_get(dev);
106 if (caifd == NULL) {
107 spin_unlock_bh(&caifdevs->lock);
108 return;
109 }
110
111 list_del(&caifd->list);
112 spin_unlock_bh(&caifdevs->lock);
113
114 kfree(caifd);
115 return;
116}
117
118static int transmit(struct cflayer *layer, struct cfpkt *pkt)
119{
120 struct caif_device_entry *caifd =
121 container_of(layer, struct caif_device_entry, layer);
122 struct sk_buff *skb, *skb2;
123 int ret = -EINVAL;
124 skb = cfpkt_tonative(pkt);
125 skb->dev = caifd->netdev;
126 /*
127 * Don't allow SKB to be destroyed upon error, but signal resend
128 * notification to clients. We can't rely on the return value as
129 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
130 */
131 if (netif_queue_stopped(caifd->netdev))
132 return -EAGAIN;
133 skb2 = skb_get(skb);
134
135 ret = dev_queue_xmit(skb2);
136
137 if (!ret)
138 kfree_skb(skb);
139 else
140 return -EAGAIN;
141
142 return 0;
143}
144
145static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
146{
147 struct caif_device_entry *caifd;
148 struct caif_dev_common *caifdev;
149 caifd = container_of(layr, struct caif_device_entry, layer);
150 caifdev = netdev_priv(caifd->netdev);
151 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
152 atomic_set(&caifd->in_use, 1);
153 wake_up_interruptible(&caifd->event);
154
155 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
156 atomic_set(&caifd->in_use, 0);
157 wake_up_interruptible(&caifd->event);
158 }
159 return 0;
160}
161
162/*
163 * Stuff received packets to associated sockets.
164 * On error, returns non-zero and releases the skb.
165 */
166static int receive(struct sk_buff *skb, struct net_device *dev,
167 struct packet_type *pkttype, struct net_device *orig_dev)
168{
169 struct net *net;
170 struct cfpkt *pkt;
171 struct caif_device_entry *caifd;
172 net = dev_net(dev);
173 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
174 caifd = caif_get(dev);
175 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
176 return NET_RX_DROP;
177
178 if (caifd->layer.up->receive(caifd->layer.up, pkt))
179 return NET_RX_DROP;
180
181 return 0;
182}
183
184static struct packet_type caif_packet_type __read_mostly = {
185 .type = cpu_to_be16(ETH_P_CAIF),
186 .func = receive,
187};
188
189static void dev_flowctrl(struct net_device *dev, int on)
190{
191 struct caif_device_entry *caifd = caif_get(dev);
192 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
193 return;
194
195 caifd->layer.up->ctrlcmd(caifd->layer.up,
196 on ?
197 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
198 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
199 caifd->layer.id);
200}
201
202/* notify Caif of device events */
203static int caif_device_notify(struct notifier_block *me, unsigned long what,
204 void *arg)
205{
206 struct net_device *dev = arg;
207 struct caif_device_entry *caifd = NULL;
208 struct caif_dev_common *caifdev;
209 enum cfcnfg_phy_preference pref;
210 int res = -EINVAL;
211 enum cfcnfg_phy_type phy_type;
212
213 if (dev->type != ARPHRD_CAIF)
214 return 0;
215
216 switch (what) {
217 case NETDEV_REGISTER:
218 pr_info("CAIF: %s():register %s\n", __func__, dev->name);
219 caifd = caif_device_alloc(dev);
220 if (caifd == NULL)
221 break;
222 caifdev = netdev_priv(dev);
223 caifdev->flowctrl = dev_flowctrl;
224 atomic_set(&caifd->state, what);
225 res = 0;
226 break;
227
228 case NETDEV_UP:
229 pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
230 caifd = caif_get(dev);
231 if (caifd == NULL)
232 break;
233 caifdev = netdev_priv(dev);
234 if (atomic_read(&caifd->state) == NETDEV_UP) {
235 pr_info("CAIF: %s():%s already up\n",
236 __func__, dev->name);
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242
243 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG;
245 else
246 phy_type = CFPHYTYPE_CAIF;
247
248 switch (caifdev->link_select) {
249 case CAIF_LINK_HIGH_BANDW:
250 pref = CFPHYPREF_LOW_LAT;
251 break;
252 case CAIF_LINK_LOW_LATENCY:
253 pref = CFPHYPREF_HIGH_BW;
254 break;
255 default:
256 pref = CFPHYPREF_HIGH_BW;
257 break;
258 }
259
260 cfcnfg_add_phy_layer(get_caif_conf(),
261 phy_type,
262 dev,
263 &caifd->layer,
264 &caifd->phyid,
265 pref,
266 caifdev->use_fcs,
267 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name,
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break;
272
273 case NETDEV_GOING_DOWN:
274 caifd = caif_get(dev);
275 if (caifd == NULL)
276 break;
277 pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
278
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
280 atomic_read(&caifd->state) == NETDEV_DOWN)
281 break;
282
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 res = wait_event_interruptible_timeout(caifd->event,
290 atomic_read(&caifd->in_use) == 0,
291 TIMEOUT);
292 break;
293
294 case NETDEV_DOWN:
295 caifd = caif_get(dev);
296 if (caifd == NULL)
297 break;
298 pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
299 if (atomic_read(&caifd->in_use))
300 pr_warning("CAIF: %s(): "
301 "Unregistering an active CAIF device: %s\n",
302 __func__, dev->name);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 atomic_set(&caifd->state, what);
305 break;
306
307 case NETDEV_UNREGISTER:
308 caifd = caif_get(dev);
309 pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
310 atomic_set(&caifd->state, what);
311 caif_device_destroy(dev);
312 break;
313 }
314 return 0;
315}
316
317static struct notifier_block caif_device_notifier = {
318 .notifier_call = caif_device_notify,
319 .priority = 0,
320};
321
322
323struct cfcnfg *get_caif_conf(void)
324{
325 return cfg;
326}
327EXPORT_SYMBOL(get_caif_conf);
328
329int caif_connect_client(struct caif_connect_request *conn_req,
330 struct cflayer *client_layer)
331{
332 struct cfctrl_link_param param;
Sjur Braendelande539d832010-04-28 08:54:35 +0000333 int ret;
334 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
335 if (ret)
336 return ret;
337 /* Hook up the adaptation layer. */
338 return cfcnfg_add_adaptation_layer(get_caif_conf(),
Sjur Braendelandc72dfae2010-03-30 13:56:25 +0000339 &param, client_layer);
Sjur Braendelandc72dfae2010-03-30 13:56:25 +0000340}
341EXPORT_SYMBOL(caif_connect_client);
342
343int caif_disconnect_client(struct cflayer *adap_layer)
344{
Sjur Braendelande539d832010-04-28 08:54:35 +0000345 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
Sjur Braendelandc72dfae2010-03-30 13:56:25 +0000346}
347EXPORT_SYMBOL(caif_disconnect_client);
348
Sjur Braendeland5b208652010-04-28 08:54:36 +0000349void caif_release_client(struct cflayer *adap_layer)
350{
351 cfcnfg_release_adap_layer(adap_layer);
352}
353EXPORT_SYMBOL(caif_release_client);
354
Sjur Braendelandc72dfae2010-03-30 13:56:25 +0000355/* Per-namespace Caif devices handling */
356static int caif_init_net(struct net *net)
357{
358 struct caif_net *caifn = net_generic(net, caif_net_id);
359 INIT_LIST_HEAD(&caifn->caifdevs.list);
360 spin_lock_init(&caifn->caifdevs.lock);
361 return 0;
362}
363
364static void caif_exit_net(struct net *net)
365{
366 struct net_device *dev;
367 int res;
368 rtnl_lock();
369 for_each_netdev(net, dev) {
370 if (dev->type != ARPHRD_CAIF)
371 continue;
372 res = dev_close(dev);
373 caif_device_destroy(dev);
374 }
375 rtnl_unlock();
376}
377
378static struct pernet_operations caif_net_ops = {
379 .init = caif_init_net,
380 .exit = caif_exit_net,
381 .id = &caif_net_id,
382 .size = sizeof(struct caif_net),
383};
384
385/* Initialize Caif devices list */
386static int __init caif_device_init(void)
387{
388 int result;
389 cfg = cfcnfg_create();
390 if (!cfg) {
391 pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
392 goto err_cfcnfg_create_failed;
393 }
394 result = register_pernet_device(&caif_net_ops);
395
396 if (result) {
397 kfree(cfg);
398 cfg = NULL;
399 return result;
400 }
401 dev_add_pack(&caif_packet_type);
402 register_netdevice_notifier(&caif_device_notifier);
403
404 return result;
405err_cfcnfg_create_failed:
406 return -ENODEV;
407}
408
409static void __exit caif_device_exit(void)
410{
411 dev_remove_pack(&caif_packet_type);
412 unregister_pernet_device(&caif_net_ops);
413 unregister_netdevice_notifier(&caif_device_notifier);
414 cfcnfg_remove(cfg);
415}
416
417module_init(caif_device_init);
418module_exit(caif_device_exit);