blob: c2dea4916e5d720bb29814153f302ec364fe4f61 [file] [log] [blame]
Sjur Braendeland9b271052010-03-30 13:56:30 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
sjur.brandeland@stericsson.com26ee65e2013-04-22 23:57:01 +00003 * Author: Sjur Brendeland
Sjur Braendeland9b271052010-03-30 13:56:30 +00004 * License terms: GNU General Public License (GPL) version 2
5 */
6
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00007#include <linux/hardirq.h>
Sjur Braendeland9b271052010-03-30 13:56:30 +00008#include <linux/init.h>
Sjur Braendeland9b271052010-03-30 13:56:30 +00009#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17#include <linux/if_arp.h>
18#include <net/caif/caif_device.h>
19#include <net/caif/cfcnfg.h>
20#include <linux/err.h>
21#include <linux/debugfs.h>
22
23MODULE_LICENSE("GPL");
sjur.brandeland@stericsson.com26ee65e2013-04-22 23:57:01 +000024MODULE_AUTHOR("Sjur Brendeland");
Sjur Braendeland9b271052010-03-30 13:56:30 +000025MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF);
28
29#define SEND_QUEUE_LOW 10
30#define SEND_QUEUE_HIGH 100
31#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33#define MAX_WRITE_CHUNK 4096
34#define ON 1
35#define OFF 0
36#define CAIF_MAX_MTU 4096
37
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +040038static DEFINE_SPINLOCK(ser_lock);
Sjur Braendeland9b271052010-03-30 13:56:30 +000039static LIST_HEAD(ser_list);
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +040040static LIST_HEAD(ser_release_list);
Sjur Braendeland9b271052010-03-30 13:56:30 +000041
Rusty Russelleb939922011-12-19 14:08:01 +000042static bool ser_loop;
Sjur Braendeland9b271052010-03-30 13:56:30 +000043module_param(ser_loop, bool, S_IRUGO);
44MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45
Rusty Russelleb939922011-12-19 14:08:01 +000046static bool ser_use_stx = true;
Sjur Braendeland9b271052010-03-30 13:56:30 +000047module_param(ser_use_stx, bool, S_IRUGO);
48MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49
Rusty Russelleb939922011-12-19 14:08:01 +000050static bool ser_use_fcs = true;
Sjur Braendeland9b271052010-03-30 13:56:30 +000051
52module_param(ser_use_fcs, bool, S_IRUGO);
53MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54
55static int ser_write_chunk = MAX_WRITE_CHUNK;
56module_param(ser_write_chunk, int, S_IRUGO);
57
58MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59
60static struct dentry *debugfsdir;
61
62static int caif_net_open(struct net_device *dev);
63static int caif_net_close(struct net_device *dev);
64
65struct ser_device {
66 struct caif_dev_common common;
67 struct list_head node;
68 struct net_device *dev;
69 struct sk_buff_head head;
70 struct tty_struct *tty;
71 bool tx_started;
72 unsigned long state;
Sjur Braendeland9b271052010-03-30 13:56:30 +000073#ifdef CONFIG_DEBUG_FS
74 struct dentry *debugfs_tty_dir;
75 struct debugfs_blob_wrapper tx_blob;
76 struct debugfs_blob_wrapper rx_blob;
77 u8 rx_data[128];
78 u8 tx_data[128];
79 u8 tty_status;
80
81#endif
82};
83
84static void caifdev_setup(struct net_device *dev);
85static void ldisc_tx_wakeup(struct tty_struct *tty);
86#ifdef CONFIG_DEBUG_FS
87static inline void update_tty_status(struct ser_device *ser)
88{
89 ser->tty_status =
90 ser->tty->stopped << 5 |
Sjur Braendeland9b271052010-03-30 13:56:30 +000091 ser->tty->flow_stopped << 3 |
92 ser->tty->packet << 2 |
Jiri Slaby0d3b88d2013-03-20 15:30:57 +010093 ser->tty->port->low_latency << 1;
Sjur Braendeland9b271052010-03-30 13:56:30 +000094}
95static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
96{
97 ser->debugfs_tty_dir =
98 debugfs_create_dir(tty->name, debugfsdir);
99 if (!IS_ERR(ser->debugfs_tty_dir)) {
100 debugfs_create_blob("last_tx_msg", S_IRUSR,
101 ser->debugfs_tty_dir,
102 &ser->tx_blob);
103
104 debugfs_create_blob("last_rx_msg", S_IRUSR,
105 ser->debugfs_tty_dir,
106 &ser->rx_blob);
107
108 debugfs_create_x32("ser_state", S_IRUSR,
109 ser->debugfs_tty_dir,
110 (u32 *)&ser->state);
111
112 debugfs_create_x8("tty_status", S_IRUSR,
113 ser->debugfs_tty_dir,
114 &ser->tty_status);
115
116 }
117 ser->tx_blob.data = ser->tx_data;
118 ser->tx_blob.size = 0;
119 ser->rx_blob.data = ser->rx_data;
120 ser->rx_blob.size = 0;
121}
122
123static inline void debugfs_deinit(struct ser_device *ser)
124{
125 debugfs_remove_recursive(ser->debugfs_tty_dir);
126}
127
128static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
129{
130 if (size > sizeof(ser->rx_data))
131 size = sizeof(ser->rx_data);
132 memcpy(ser->rx_data, data, size);
133 ser->rx_blob.data = ser->rx_data;
134 ser->rx_blob.size = size;
135}
136
137static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
138{
139 if (size > sizeof(ser->tx_data))
140 size = sizeof(ser->tx_data);
141 memcpy(ser->tx_data, data, size);
142 ser->tx_blob.data = ser->tx_data;
143 ser->tx_blob.size = size;
144}
145#else
146static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
147{
148}
149
150static inline void debugfs_deinit(struct ser_device *ser)
151{
152}
153
154static inline void update_tty_status(struct ser_device *ser)
155{
156}
157
158static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
159{
160}
161
162static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
163{
164}
165
166#endif
167
Linus Torvalds55db4c62011-06-04 06:33:24 +0900168static void ldisc_receive(struct tty_struct *tty, const u8 *data,
169 char *flags, int count)
Sjur Braendeland9b271052010-03-30 13:56:30 +0000170{
171 struct sk_buff *skb = NULL;
172 struct ser_device *ser;
173 int ret;
174 u8 *p;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000175
Sjur Braendeland9b271052010-03-30 13:56:30 +0000176 ser = tty->disc_data;
177
178 /*
179 * NOTE: flags may contain information about break or overrun.
180 * This is not yet handled.
181 */
182
183
184 /*
185 * Workaround for garbage at start of transmission,
186 * only enable if STX handling is not enabled.
187 */
188 if (!ser->common.use_stx && !ser->tx_started) {
189 dev_info(&ser->dev->dev,
190 "Bytes received before initial transmission -"
191 "bytes discarded.\n");
192 return;
193 }
194
195 BUG_ON(ser->dev == NULL);
196
197 /* Get a suitable caif packet and copy in data. */
198 skb = netdev_alloc_skb(ser->dev, count+1);
Sjur Braendelandd3f744e2010-04-28 08:54:34 +0000199 if (skb == NULL)
200 return;
Sjur Braendeland9b271052010-03-30 13:56:30 +0000201 p = skb_put(skb, count);
202 memcpy(p, data, count);
203
204 skb->protocol = htons(ETH_P_CAIF);
205 skb_reset_mac_header(skb);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000206 debugfs_rx(ser, data, count);
207 /* Push received packet up the stack. */
208 ret = netif_rx_ni(skb);
209 if (!ret) {
210 ser->dev->stats.rx_packets++;
211 ser->dev->stats.rx_bytes += count;
212 } else
213 ++ser->dev->stats.rx_dropped;
214 update_tty_status(ser);
215}
216
217static int handle_tx(struct ser_device *ser)
218{
219 struct tty_struct *tty;
220 struct sk_buff *skb;
221 int tty_wr, len, room;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000222
Sjur Braendeland9b271052010-03-30 13:56:30 +0000223 tty = ser->tty;
224 ser->tx_started = true;
225
226 /* Enter critical section */
227 if (test_and_set_bit(CAIF_SENDING, &ser->state))
228 return 0;
229
230 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
231 while ((skb = skb_peek(&ser->head)) != NULL) {
232
233 /* Make sure you don't write too much */
234 len = skb->len;
235 room = tty_write_room(tty);
236 if (!room)
237 break;
238 if (room > ser_write_chunk)
239 room = ser_write_chunk;
240 if (len > room)
241 len = room;
242
243 /* Write to tty or loopback */
244 if (!ser_loop) {
245 tty_wr = tty->ops->write(tty, skb->data, len);
246 update_tty_status(ser);
247 } else {
248 tty_wr = len;
249 ldisc_receive(tty, skb->data, NULL, len);
250 }
251 ser->dev->stats.tx_packets++;
252 ser->dev->stats.tx_bytes += tty_wr;
253
254 /* Error on TTY ?! */
255 if (tty_wr < 0)
256 goto error;
257 /* Reduce buffer written, and discard if empty */
258 skb_pull(skb, tty_wr);
259 if (skb->len == 0) {
260 struct sk_buff *tmp = skb_dequeue(&ser->head);
Roar Førdef84ea772011-12-06 12:15:44 +0000261 WARN_ON(tmp != skb);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000262 if (in_interrupt())
263 dev_kfree_skb_irq(skb);
264 else
265 kfree_skb(skb);
266 }
267 }
268 /* Send flow off if queue is empty */
269 if (ser->head.qlen <= SEND_QUEUE_LOW &&
270 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
271 ser->common.flowctrl != NULL)
272 ser->common.flowctrl(ser->dev, ON);
273 clear_bit(CAIF_SENDING, &ser->state);
274 return 0;
275error:
276 clear_bit(CAIF_SENDING, &ser->state);
277 return tty_wr;
278}
279
280static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
281{
282 struct ser_device *ser;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000283
Sjur Braendeland9b271052010-03-30 13:56:30 +0000284 BUG_ON(dev == NULL);
285 ser = netdev_priv(dev);
286
287 /* Send flow off once, on high water mark */
288 if (ser->head.qlen > SEND_QUEUE_HIGH &&
289 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
290 ser->common.flowctrl != NULL)
291
292 ser->common.flowctrl(ser->dev, OFF);
293
294 skb_queue_tail(&ser->head, skb);
295 return handle_tx(ser);
296}
297
298
299static void ldisc_tx_wakeup(struct tty_struct *tty)
300{
301 struct ser_device *ser;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000302
Sjur Braendeland9b271052010-03-30 13:56:30 +0000303 ser = tty->disc_data;
304 BUG_ON(ser == NULL);
Roar Førdef84ea772011-12-06 12:15:44 +0000305 WARN_ON(ser->tty != tty);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000306 handle_tx(ser);
307}
308
309
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400310static void ser_release(struct work_struct *work)
311{
312 struct list_head list;
313 struct ser_device *ser, *tmp;
314
315 spin_lock(&ser_lock);
316 list_replace_init(&ser_release_list, &list);
317 spin_unlock(&ser_lock);
318
319 if (!list_empty(&list)) {
320 rtnl_lock();
321 list_for_each_entry_safe(ser, tmp, &list, node) {
322 dev_close(ser->dev);
323 unregister_netdevice(ser->dev);
324 debugfs_deinit(ser);
325 }
326 rtnl_unlock();
327 }
328}
329
330static DECLARE_WORK(ser_release_work, ser_release);
331
Sjur Braendeland9b271052010-03-30 13:56:30 +0000332static int ldisc_open(struct tty_struct *tty)
333{
334 struct ser_device *ser;
335 struct net_device *dev;
336 char name[64];
337 int result;
338
Alan Coxc93f0942010-04-07 16:49:31 -0700339 /* No write no play */
340 if (tty->ops->write == NULL)
341 return -EOPNOTSUPP;
Sjur Braendelandd3f744e2010-04-28 08:54:34 +0000342 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
343 return -EPERM;
Alan Coxc93f0942010-04-07 16:49:31 -0700344
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400345 /* release devices to avoid name collision */
346 ser_release(NULL);
347
Dan Carpentercab6ce92013-09-03 12:02:32 +0300348 result = snprintf(name, sizeof(name), "cf%s", tty->name);
349 if (result >= IFNAMSIZ)
350 return -EINVAL;
Tom Gundersenc835a672014-07-14 16:37:24 +0200351 dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
352 caifdev_setup);
Alan Coxc66b9b72012-07-24 02:42:14 +0000353 if (!dev)
354 return -ENOMEM;
355
Sjur Braendeland9b271052010-03-30 13:56:30 +0000356 ser = netdev_priv(dev);
Alan Coxe31d5a02010-04-07 16:50:00 -0700357 ser->tty = tty_kref_get(tty);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000358 ser->dev = dev;
359 debugfs_init(ser, tty);
360 tty->receive_room = N_TTY_BUF_SIZE;
361 tty->disc_data = ser;
362 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
363 rtnl_lock();
364 result = register_netdevice(dev);
365 if (result) {
366 rtnl_unlock();
367 free_netdev(dev);
368 return -ENODEV;
369 }
370
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400371 spin_lock(&ser_lock);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000372 list_add(&ser->node, &ser_list);
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400373 spin_unlock(&ser_lock);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000374 rtnl_unlock();
375 netif_stop_queue(dev);
376 update_tty_status(ser);
377 return 0;
378}
379
380static void ldisc_close(struct tty_struct *tty)
381{
382 struct ser_device *ser = tty->disc_data;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000383
Alan Coxe31d5a02010-04-07 16:50:00 -0700384 tty_kref_put(ser->tty);
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400385
386 spin_lock(&ser_lock);
387 list_move(&ser->node, &ser_release_list);
388 spin_unlock(&ser_lock);
389 schedule_work(&ser_release_work);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000390}
391
392/* The line discipline structure. */
393static struct tty_ldisc_ops caif_ldisc = {
394 .owner = THIS_MODULE,
395 .magic = TTY_LDISC_MAGIC,
396 .name = "n_caif",
397 .open = ldisc_open,
398 .close = ldisc_close,
399 .receive_buf = ldisc_receive,
400 .write_wakeup = ldisc_tx_wakeup
401};
402
403static int register_ldisc(void)
404{
405 int result;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000406
Sjur Braendeland9b271052010-03-30 13:56:30 +0000407 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
408 if (result < 0) {
409 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
410 result);
411 return result;
412 }
413 return result;
414}
415static const struct net_device_ops netdev_ops = {
416 .ndo_open = caif_net_open,
417 .ndo_stop = caif_net_close,
418 .ndo_start_xmit = caif_xmit
419};
420
421static void caifdev_setup(struct net_device *dev)
422{
423 struct ser_device *serdev = netdev_priv(dev);
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000424
Sjur Braendeland9b271052010-03-30 13:56:30 +0000425 dev->features = 0;
426 dev->netdev_ops = &netdev_ops;
427 dev->type = ARPHRD_CAIF;
428 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
429 dev->mtu = CAIF_MAX_MTU;
Phil Sutter4676a152015-08-18 10:30:46 +0200430 dev->priv_flags |= IFF_NO_QUEUE;
Sjur Braendeland9b271052010-03-30 13:56:30 +0000431 dev->destructor = free_netdev;
432 skb_queue_head_init(&serdev->head);
433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
434 serdev->common.use_frag = true;
435 serdev->common.use_stx = ser_use_stx;
436 serdev->common.use_fcs = ser_use_fcs;
437 serdev->dev = dev;
438}
439
440
441static int caif_net_open(struct net_device *dev)
442{
Sjur Braendeland9b271052010-03-30 13:56:30 +0000443 netif_wake_queue(dev);
444 return 0;
445}
446
447static int caif_net_close(struct net_device *dev)
448{
449 netif_stop_queue(dev);
450 return 0;
451}
452
453static int __init caif_ser_init(void)
454{
455 int ret;
Dan Carpenterc1f8fc52010-05-31 21:09:33 +0000456
Sjur Braendeland9b271052010-03-30 13:56:30 +0000457 ret = register_ldisc();
458 debugfsdir = debugfs_create_dir("caif_serial", NULL);
459 return ret;
460}
461
462static void __exit caif_ser_exit(void)
463{
Konstantin Khlebnikov56e0ef52013-07-08 11:23:04 +0400464 spin_lock(&ser_lock);
465 list_splice(&ser_list, &ser_release_list);
466 spin_unlock(&ser_lock);
467 ser_release(NULL);
468 cancel_work_sync(&ser_release_work);
Sjur Braendeland9b271052010-03-30 13:56:30 +0000469 tty_unregister_ldisc(N_CAIF);
470 debugfs_remove_recursive(debugfsdir);
471}
472
473module_init(caif_ser_init);
474module_exit(caif_ser_exit);