blob: 8c7279bb353bc52848b4e930000af5cd98376b7e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
3 * James Leu (jleu@mindspring.net).
4 * Copyright (C) 2001 by various other people who didn't put their name here.
5 * Licensed under the GPL.
6 */
7
8#include "linux/config.h"
9#include "linux/kernel.h"
10#include "linux/netdevice.h"
11#include "linux/rtnetlink.h"
12#include "linux/skbuff.h"
13#include "linux/socket.h"
14#include "linux/spinlock.h"
15#include "linux/module.h"
16#include "linux/init.h"
17#include "linux/etherdevice.h"
18#include "linux/list.h"
19#include "linux/inetdevice.h"
20#include "linux/ctype.h"
21#include "linux/bootmem.h"
22#include "linux/ethtool.h"
Russell Kingd052d1b2005-10-29 19:07:23 +010023#include "linux/platform_device.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "asm/uaccess.h"
25#include "user_util.h"
26#include "kern_util.h"
27#include "net_kern.h"
28#include "net_user.h"
29#include "mconsole_kern.h"
30#include "init.h"
31#include "irq_user.h"
32#include "irq_kern.h"
33
34#define DRIVER_NAME "uml-netdev"
35
36static DEFINE_SPINLOCK(opened_lock);
Jeff Dike90107722006-01-06 00:18:54 -080037static LIST_HEAD(opened);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39static int uml_net_rx(struct net_device *dev)
40{
41 struct uml_net_private *lp = dev->priv;
42 int pkt_len;
43 struct sk_buff *skb;
44
45 /* If we can't allocate memory, try again next round. */
46 skb = dev_alloc_skb(dev->mtu);
47 if (skb == NULL) {
48 lp->stats.rx_dropped++;
49 return 0;
50 }
51
52 skb->dev = dev;
53 skb_put(skb, dev->mtu);
54 skb->mac.raw = skb->data;
55 pkt_len = (*lp->read)(lp->fd, &skb, lp);
56
57 if (pkt_len > 0) {
58 skb_trim(skb, pkt_len);
59 skb->protocol = (*lp->protocol)(skb);
60 netif_rx(skb);
61
62 lp->stats.rx_bytes += skb->len;
63 lp->stats.rx_packets++;
64 return pkt_len;
65 }
66
67 kfree_skb(skb);
68 return pkt_len;
69}
70
Paolo 'Blaisorblade' Giarrusso71c8d4c2006-01-18 17:42:56 -080071static void uml_dev_close(void* dev)
72{
73 dev_close( (struct net_device *) dev);
74}
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
77{
78 struct net_device *dev = dev_id;
79 struct uml_net_private *lp = dev->priv;
80 int err;
81
82 if(!netif_running(dev))
83 return(IRQ_NONE);
84
85 spin_lock(&lp->lock);
86 while((err = uml_net_rx(dev)) > 0) ;
87 if(err < 0) {
Paolo 'Blaisorblade' Giarrusso71c8d4c2006-01-18 17:42:56 -080088 DECLARE_WORK(close_work, uml_dev_close, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 printk(KERN_ERR
90 "Device '%s' read returned %d, shutting it down\n",
91 dev->name, err);
Paolo 'Blaisorblade' Giarrusso71c8d4c2006-01-18 17:42:56 -080092 /* dev_close can't be called in interrupt context, and takes
93 * again lp->lock.
94 * And dev_close() can be safely called multiple times on the
95 * same device, since it tests for (dev->flags & IFF_UP). So
96 * there's no harm in delaying the device shutdown. */
97 schedule_work(&close_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 goto out;
99 }
100 reactivate_fd(lp->fd, UM_ETH_IRQ);
101
Paolo 'Blaisorblade' Giarrusso71c8d4c2006-01-18 17:42:56 -0800102out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 spin_unlock(&lp->lock);
104 return(IRQ_HANDLED);
105}
106
107static int uml_net_open(struct net_device *dev)
108{
109 struct uml_net_private *lp = dev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 int err;
111
112 spin_lock(&lp->lock);
113
114 if(lp->fd >= 0){
115 err = -ENXIO;
116 goto out;
117 }
118
119 if(!lp->have_mac){
Bodo Stroesser0e764222005-11-07 00:58:47 -0800120 dev_ip_addr(dev, &lp->mac[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 set_ether_mac(dev, lp->mac);
122 }
123
124 lp->fd = (*lp->open)(&lp->user);
125 if(lp->fd < 0){
126 err = lp->fd;
127 goto out;
128 }
129
130 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
131 SA_INTERRUPT | SA_SHIRQ, dev->name, dev);
132 if(err != 0){
133 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 err = -ENETUNREACH;
Jeff Dike14d9ead2006-02-07 12:58:42 -0800135 goto out_close;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 }
137
138 lp->tl.data = (unsigned long) &lp->user;
139 netif_start_queue(dev);
140
141 /* clear buffer - it can happen that the host side of the interface
142 * is full when we get here. In this case, new data is never queued,
143 * SIGIOs never arrive, and the net never works.
144 */
145 while((err = uml_net_rx(dev)) > 0) ;
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 spin_unlock(&lp->lock);
Jeff Dike14d9ead2006-02-07 12:58:42 -0800148
149 spin_lock(&opened_lock);
150 list_add(&lp->list, &opened);
151 spin_unlock(&opened_lock);
152
153 return 0;
154out_close:
155 if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
156 lp->fd = -1;
157out:
158 spin_unlock(&lp->lock);
159 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162static int uml_net_close(struct net_device *dev)
163{
164 struct uml_net_private *lp = dev->priv;
165
166 netif_stop_queue(dev);
167 spin_lock(&lp->lock);
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 free_irq(dev->irq, dev);
170 if(lp->close != NULL)
171 (*lp->close)(lp->fd, &lp->user);
172 lp->fd = -1;
173
174 spin_unlock(&lp->lock);
Jeff Dike14d9ead2006-02-07 12:58:42 -0800175
176 spin_lock(&opened_lock);
177 list_del(&lp->list);
178 spin_unlock(&opened_lock);
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 return 0;
181}
182
183static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
184{
185 struct uml_net_private *lp = dev->priv;
186 unsigned long flags;
187 int len;
188
189 netif_stop_queue(dev);
190
191 spin_lock_irqsave(&lp->lock, flags);
192
193 len = (*lp->write)(lp->fd, &skb, lp);
194
195 if(len == skb->len) {
196 lp->stats.tx_packets++;
197 lp->stats.tx_bytes += skb->len;
198 dev->trans_start = jiffies;
199 netif_start_queue(dev);
200
201 /* this is normally done in the interrupt when tx finishes */
202 netif_wake_queue(dev);
203 }
204 else if(len == 0){
205 netif_start_queue(dev);
206 lp->stats.tx_dropped++;
207 }
208 else {
209 netif_start_queue(dev);
210 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
211 }
212
213 spin_unlock_irqrestore(&lp->lock, flags);
214
215 dev_kfree_skb(skb);
216
217 return 0;
218}
219
220static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
221{
222 struct uml_net_private *lp = dev->priv;
223 return &lp->stats;
224}
225
226static void uml_net_set_multicast_list(struct net_device *dev)
227{
228 if (dev->flags & IFF_PROMISC) return;
229 else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
230 else dev->flags &= ~IFF_ALLMULTI;
231}
232
233static void uml_net_tx_timeout(struct net_device *dev)
234{
235 dev->trans_start = jiffies;
236 netif_wake_queue(dev);
237}
238
239static int uml_net_set_mac(struct net_device *dev, void *addr)
240{
241 struct uml_net_private *lp = dev->priv;
242 struct sockaddr *hwaddr = addr;
243
244 spin_lock(&lp->lock);
245 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
246 spin_unlock(&lp->lock);
247
248 return(0);
249}
250
251static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
252{
253 struct uml_net_private *lp = dev->priv;
254 int err = 0;
255
256 spin_lock(&lp->lock);
257
258 new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
259 if(new_mtu < 0){
260 err = new_mtu;
261 goto out;
262 }
263
264 dev->mtu = new_mtu;
265
266 out:
267 spin_unlock(&lp->lock);
268 return err;
269}
270
Christoph Hellwig6d387482005-11-07 06:21:21 +0100271static void uml_net_get_drvinfo(struct net_device *dev,
272 struct ethtool_drvinfo *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
Christoph Hellwig6d387482005-11-07 06:21:21 +0100274 strcpy(info->driver, DRIVER_NAME);
275 strcpy(info->version, "42");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
Christoph Hellwig6d387482005-11-07 06:21:21 +0100278static struct ethtool_ops uml_net_ethtool_ops = {
279 .get_drvinfo = uml_net_get_drvinfo,
280 .get_link = ethtool_op_get_link,
281};
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283void uml_net_user_timer_expire(unsigned long _conn)
284{
285#ifdef undef
286 struct connection *conn = (struct connection *)_conn;
287
288 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
289 do_connect(conn);
290#endif
291}
292
293static DEFINE_SPINLOCK(devices_lock);
Jeff Dike90107722006-01-06 00:18:54 -0800294static LIST_HEAD(devices);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Russell King3ae5eae2005-11-09 22:32:44 +0000296static struct platform_driver uml_net_driver = {
297 .driver = {
298 .name = DRIVER_NAME,
299 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300};
301static int driver_registered;
302
303static int eth_configure(int n, void *init, char *mac,
304 struct transport *transport)
305{
306 struct uml_net *device;
307 struct net_device *dev;
308 struct uml_net_private *lp;
309 int save, err, size;
310
311 size = transport->private_size + sizeof(struct uml_net_private) +
312 sizeof(((struct uml_net_private *) 0)->user);
313
314 device = kmalloc(sizeof(*device), GFP_KERNEL);
315 if (device == NULL) {
316 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
317 return(1);
318 }
319
320 memset(device, 0, sizeof(*device));
321 INIT_LIST_HEAD(&device->list);
322 device->index = n;
323
324 spin_lock(&devices_lock);
325 list_add(&device->list, &devices);
326 spin_unlock(&devices_lock);
327
328 if (setup_etheraddr(mac, device->mac))
329 device->have_mac = 1;
330
331 printk(KERN_INFO "Netdevice %d ", n);
332 if (device->have_mac)
333 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
334 device->mac[0], device->mac[1],
335 device->mac[2], device->mac[3],
336 device->mac[4], device->mac[5]);
337 printk(": ");
338 dev = alloc_etherdev(size);
339 if (dev == NULL) {
340 printk(KERN_ERR "eth_configure: failed to allocate device\n");
341 return 1;
342 }
343
Paolo 'Blaisorblade' Giarrussoe56a7882006-01-18 17:42:55 -0800344 lp = dev->priv;
345 /* This points to the transport private data. It's still clear, but we
346 * must memset it to 0 *now*. Let's help the drivers. */
347 memset(lp, 0, size);
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 /* sysfs register */
350 if (!driver_registered) {
Russell King3ae5eae2005-11-09 22:32:44 +0000351 platform_driver_register(&uml_net_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 driver_registered = 1;
353 }
354 device->pdev.id = n;
355 device->pdev.name = DRIVER_NAME;
356 platform_device_register(&device->pdev);
357 SET_NETDEV_DEV(dev,&device->pdev.dev);
358
359 /* If this name ends up conflicting with an existing registered
360 * netdevice, that is OK, register_netdev{,ice}() will notice this
361 * and fail.
362 */
363 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
364 device->dev = dev;
365
366 (*transport->kern->init)(dev, init);
367
368 dev->mtu = transport->user->max_packet;
369 dev->open = uml_net_open;
370 dev->hard_start_xmit = uml_net_start_xmit;
371 dev->stop = uml_net_close;
372 dev->get_stats = uml_net_get_stats;
373 dev->set_multicast_list = uml_net_set_multicast_list;
374 dev->tx_timeout = uml_net_tx_timeout;
375 dev->set_mac_address = uml_net_set_mac;
376 dev->change_mtu = uml_net_change_mtu;
Christoph Hellwig6d387482005-11-07 06:21:21 +0100377 dev->ethtool_ops = &uml_net_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 dev->watchdog_timeo = (HZ >> 1);
379 dev->irq = UM_ETH_IRQ;
380
381 rtnl_lock();
382 err = register_netdevice(dev);
383 rtnl_unlock();
384 if (err) {
385 device->dev = NULL;
386 /* XXX: should we call ->remove() here? */
387 free_netdev(dev);
388 return 1;
389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391 /* lp.user is the first four bytes of the transport data, which
392 * has already been initialized. This structure assignment will
393 * overwrite that, so we make sure that .user gets overwritten with
394 * what it already has.
395 */
396 save = lp->user[0];
397 *lp = ((struct uml_net_private)
398 { .list = LIST_HEAD_INIT(lp->list),
399 .dev = dev,
400 .fd = -1,
401 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
402 .have_mac = device->have_mac,
403 .protocol = transport->kern->protocol,
404 .open = transport->user->open,
405 .close = transport->user->close,
406 .remove = transport->user->remove,
407 .read = transport->kern->read,
408 .write = transport->kern->write,
409 .add_address = transport->user->add_address,
410 .delete_address = transport->user->delete_address,
411 .set_mtu = transport->user->set_mtu,
412 .user = { save } });
413
414 init_timer(&lp->tl);
415 spin_lock_init(&lp->lock);
416 lp->tl.function = uml_net_user_timer_expire;
417 if (lp->have_mac)
418 memcpy(lp->mac, device->mac, sizeof(lp->mac));
419
420 if (transport->user->init)
421 (*transport->user->init)(&lp->user, dev);
422
423 if (device->have_mac)
424 set_ether_mac(dev, device->mac);
425
Jeff Dike14d9ead2006-02-07 12:58:42 -0800426 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
428
429static struct uml_net *find_device(int n)
430{
431 struct uml_net *device;
432 struct list_head *ele;
433
434 spin_lock(&devices_lock);
435 list_for_each(ele, &devices){
436 device = list_entry(ele, struct uml_net, list);
437 if(device->index == n)
438 goto out;
439 }
440 device = NULL;
441 out:
442 spin_unlock(&devices_lock);
443 return(device);
444}
445
446static int eth_parse(char *str, int *index_out, char **str_out)
447{
448 char *end;
449 int n;
450
451 n = simple_strtoul(str, &end, 0);
452 if(end == str){
453 printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
454 return(1);
455 }
456 if(n < 0){
457 printk(KERN_ERR "eth_setup: device %d is negative\n", n);
458 return(1);
459 }
460 str = end;
461 if(*str != '='){
462 printk(KERN_ERR
463 "eth_setup: expected '=' after device number\n");
464 return(1);
465 }
466 str++;
467 if(find_device(n)){
468 printk(KERN_ERR "eth_setup: Device %d already configured\n",
469 n);
470 return(1);
471 }
472 if(index_out) *index_out = n;
473 *str_out = str;
474 return(0);
475}
476
477struct eth_init {
478 struct list_head list;
479 char *init;
480 int index;
481};
482
483/* Filled in at boot time. Will need locking if the transports become
484 * modular.
485 */
486struct list_head transports = LIST_HEAD_INIT(transports);
487
488/* Filled in during early boot */
489struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
490
491static int check_transport(struct transport *transport, char *eth, int n,
492 void **init_out, char **mac_out)
493{
494 int len;
495
496 len = strlen(transport->name);
497 if(strncmp(eth, transport->name, len))
498 return(0);
499
500 eth += len;
501 if(*eth == ',')
502 eth++;
503 else if(*eth != '\0')
504 return(0);
505
506 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
507 if(*init_out == NULL)
508 return(1);
509
510 if(!transport->setup(eth, mac_out, *init_out)){
511 kfree(*init_out);
512 *init_out = NULL;
513 }
514 return(1);
515}
516
517void register_transport(struct transport *new)
518{
519 struct list_head *ele, *next;
520 struct eth_init *eth;
521 void *init;
522 char *mac = NULL;
523 int match;
524
525 list_add(&new->list, &transports);
526
527 list_for_each_safe(ele, next, &eth_cmd_line){
528 eth = list_entry(ele, struct eth_init, list);
529 match = check_transport(new, eth->init, eth->index, &init,
530 &mac);
531 if(!match)
532 continue;
533 else if(init != NULL){
534 eth_configure(eth->index, init, mac, new);
535 kfree(init);
536 }
537 list_del(&eth->list);
538 }
539}
540
541static int eth_setup_common(char *str, int index)
542{
543 struct list_head *ele;
544 struct transport *transport;
545 void *init;
546 char *mac = NULL;
547
548 list_for_each(ele, &transports){
549 transport = list_entry(ele, struct transport, list);
550 if(!check_transport(transport, str, index, &init, &mac))
551 continue;
552 if(init != NULL){
553 eth_configure(index, init, mac, transport);
554 kfree(init);
555 }
556 return(1);
557 }
558 return(0);
559}
560
561static int eth_setup(char *str)
562{
563 struct eth_init *new;
564 int n, err;
565
566 err = eth_parse(str, &n, &str);
567 if(err) return(1);
568
569 new = alloc_bootmem(sizeof(new));
570 if (new == NULL){
571 printk("eth_init : alloc_bootmem failed\n");
572 return(1);
573 }
574
575 INIT_LIST_HEAD(&new->list);
576 new->index = n;
577 new->init = str;
578
579 list_add_tail(&new->list, &eth_cmd_line);
580 return(1);
581}
582
583__setup("eth", eth_setup);
584__uml_help(eth_setup,
585"eth[0-9]+=<transport>,<options>\n"
586" Configure a network device.\n\n"
587);
588
589#if 0
590static int eth_init(void)
591{
592 struct list_head *ele, *next;
593 struct eth_init *eth;
594
595 list_for_each_safe(ele, next, &eth_cmd_line){
596 eth = list_entry(ele, struct eth_init, list);
597
598 if(eth_setup_common(eth->init, eth->index))
599 list_del(&eth->list);
600 }
601
602 return(1);
603}
604__initcall(eth_init);
605#endif
606
607static int net_config(char *str)
608{
609 int n, err;
610
611 err = eth_parse(str, &n, &str);
612 if(err) return(err);
613
Jeff Dike970d6e32006-01-06 00:18:48 -0800614 str = kstrdup(str, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if(str == NULL){
616 printk(KERN_ERR "net_config failed to strdup string\n");
617 return(-1);
618 }
619 err = !eth_setup_common(str, n);
620 if(err)
621 kfree(str);
622 return(err);
623}
624
Jeff Dike29d56cf2005-06-25 14:55:25 -0700625static int net_id(char **str, int *start_out, int *end_out)
626{
627 char *end;
628 int n;
629
630 n = simple_strtoul(*str, &end, 0);
631 if((*end != '\0') || (end == *str))
632 return -1;
633
634 *start_out = n;
635 *end_out = n;
636 *str = end;
637 return n;
638}
639
640static int net_remove(int n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
642 struct uml_net *device;
643 struct net_device *dev;
644 struct uml_net_private *lp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
646 device = find_device(n);
647 if(device == NULL)
Jeff Dike29d56cf2005-06-25 14:55:25 -0700648 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 dev = device->dev;
651 lp = dev->priv;
Jeff Dike29d56cf2005-06-25 14:55:25 -0700652 if(lp->fd > 0)
653 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if(lp->remove != NULL) (*lp->remove)(&lp->user);
655 unregister_netdev(dev);
656 platform_device_unregister(&device->pdev);
657
658 list_del(&device->list);
659 kfree(device);
660 free_netdev(dev);
Jeff Dike29d56cf2005-06-25 14:55:25 -0700661 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
664static struct mc_device net_mc = {
665 .name = "eth",
666 .config = net_config,
667 .get_config = NULL,
Jeff Dike29d56cf2005-06-25 14:55:25 -0700668 .id = net_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 .remove = net_remove,
670};
671
672static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
673 void *ptr)
674{
675 struct in_ifaddr *ifa = ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 struct net_device *dev = ifa->ifa_dev->dev;
677 struct uml_net_private *lp;
678 void (*proc)(unsigned char *, unsigned char *, void *);
679 unsigned char addr_buf[4], netmask_buf[4];
680
681 if(dev->open != uml_net_open) return(NOTIFY_DONE);
682
683 lp = dev->priv;
684
685 proc = NULL;
686 switch (event){
687 case NETDEV_UP:
688 proc = lp->add_address;
689 break;
690 case NETDEV_DOWN:
691 proc = lp->delete_address;
692 break;
693 }
694 if(proc != NULL){
Bodo Stroesser0e764222005-11-07 00:58:47 -0800695 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
696 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 (*proc)(addr_buf, netmask_buf, &lp->user);
698 }
699 return(NOTIFY_DONE);
700}
701
702struct notifier_block uml_inetaddr_notifier = {
703 .notifier_call = uml_inetaddr_event,
704};
705
706static int uml_net_init(void)
707{
708 struct list_head *ele;
709 struct uml_net_private *lp;
710 struct in_device *ip;
711 struct in_ifaddr *in;
712
713 mconsole_register_dev(&net_mc);
714 register_inetaddr_notifier(&uml_inetaddr_notifier);
715
716 /* Devices may have been opened already, so the uml_inetaddr_notifier
717 * didn't get a chance to run for them. This fakes it so that
718 * addresses which have already been set up get handled properly.
719 */
720 list_for_each(ele, &opened){
721 lp = list_entry(ele, struct uml_net_private, list);
722 ip = lp->dev->ip_ptr;
723 if(ip == NULL) continue;
724 in = ip->ifa_list;
725 while(in != NULL){
726 uml_inetaddr_event(NULL, NETDEV_UP, in);
727 in = in->ifa_next;
728 }
729 }
730
731 return(0);
732}
733
734__initcall(uml_net_init);
735
736static void close_devices(void)
737{
738 struct list_head *ele;
739 struct uml_net_private *lp;
740
741 list_for_each(ele, &opened){
742 lp = list_entry(ele, struct uml_net_private, list);
Jeff Dike8d93c702006-01-06 00:19:06 -0800743 free_irq(lp->dev->irq, lp->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if((lp->close != NULL) && (lp->fd >= 0))
745 (*lp->close)(lp->fd, &lp->user);
746 if(lp->remove != NULL) (*lp->remove)(&lp->user);
747 }
748}
749
750__uml_exitcall(close_devices);
751
752int setup_etheraddr(char *str, unsigned char *addr)
753{
754 char *end;
755 int i;
756
757 if(str == NULL)
758 return(0);
759 for(i=0;i<6;i++){
760 addr[i] = simple_strtoul(str, &end, 16);
761 if((end == str) ||
762 ((*end != ':') && (*end != ',') && (*end != '\0'))){
763 printk(KERN_ERR
764 "setup_etheraddr: failed to parse '%s' "
765 "as an ethernet address\n", str);
766 return(0);
767 }
768 str = end + 1;
769 }
770 if(addr[0] & 1){
771 printk(KERN_ERR
772 "Attempt to assign a broadcast ethernet address to a "
773 "device disallowed\n");
774 return(0);
775 }
776 return(1);
777}
778
Bodo Stroesser0e764222005-11-07 00:58:47 -0800779void dev_ip_addr(void *d, unsigned char *bin_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 struct net_device *dev = d;
782 struct in_device *ip = dev->ip_ptr;
783 struct in_ifaddr *in;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
786 printk(KERN_WARNING "dev_ip_addr - device not assigned an "
787 "IP address\n");
788 return;
789 }
Bodo Stroesser0e764222005-11-07 00:58:47 -0800790 memcpy(bin_buf, &in->ifa_address, sizeof(in->ifa_address));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791}
792
793void set_ether_mac(void *d, unsigned char *addr)
794{
795 struct net_device *dev = d;
796
797 memcpy(dev->dev_addr, addr, ETH_ALEN);
798}
799
800struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
801{
802 if((skb != NULL) && (skb_tailroom(skb) < extra)){
803 struct sk_buff *skb2;
804
805 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
806 dev_kfree_skb(skb);
807 skb = skb2;
808 }
809 if(skb != NULL) skb_put(skb, extra);
810 return(skb);
811}
812
813void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
814 void *),
815 void *arg)
816{
817 struct net_device *dev = d;
818 struct in_device *ip = dev->ip_ptr;
819 struct in_ifaddr *in;
820 unsigned char address[4], netmask[4];
821
822 if(ip == NULL) return;
823 in = ip->ifa_list;
824 while(in != NULL){
Bodo Stroesser0e764222005-11-07 00:58:47 -0800825 memcpy(address, &in->ifa_address, sizeof(address));
826 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 (*cb)(address, netmask, arg);
828 in = in->ifa_next;
829 }
830}
831
832int dev_netmask(void *d, void *m)
833{
834 struct net_device *dev = d;
835 struct in_device *ip = dev->ip_ptr;
836 struct in_ifaddr *in;
837 __u32 *mask_out = m;
838
839 if(ip == NULL)
840 return(1);
841
842 in = ip->ifa_list;
843 if(in == NULL)
844 return(1);
845
846 *mask_out = in->ifa_mask;
847 return(0);
848}
849
850void *get_output_buffer(int *len_out)
851{
852 void *ret;
853
854 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
855 if(ret) *len_out = PAGE_SIZE;
856 else *len_out = 0;
857 return(ret);
858}
859
860void free_output_buffer(void *buffer)
861{
862 free_pages((unsigned long) buffer, 0);
863}
864
865int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
866 char **gate_addr)
867{
868 char *remain;
869
870 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
871 if(remain != NULL){
872 printk("tap_setup_common - Extra garbage on specification : "
873 "'%s'\n", remain);
874 return(1);
875 }
876
877 return(0);
878}
879
880unsigned short eth_protocol(struct sk_buff *skb)
881{
882 return(eth_type_trans(skb, skb->dev));
883}
884
885/*
886 * Overrides for Emacs so that we follow Linus's tabbing style.
887 * Emacs will notice this stuff at the end of the file and automatically
888 * adjust the settings for this buffer only. This must remain at the end
889 * of the file.
890 * ---------------------------------------------------------------------------
891 * Local variables:
892 * c-file-style: "linux"
893 * End:
894 */