blob: 4615a9b3e26ce60c7205492ce3690c92a19c53dd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Handle incoming frames
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/netfilter_bridge.h>
Kyeyoon Park95850112014-10-23 14:49:17 -070019#include <linux/neighbour.h>
20#include <net/arp.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040021#include <linux/export.h>
Vlad Yasevicha37b85c2013-02-13 12:00:10 +000022#include <linux/rculist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "br_private.h"
Roopa Prabhu11538d02017-01-31 22:59:55 -080024#include "br_private_tunnel.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Eric Dumazeta386f992010-11-15 06:38:11 +000026/* Hook for brouter */
27br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
28EXPORT_SYMBOL(br_should_route_hook);
29
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -050030static int
31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
Eric W. Biederman04eb4482015-09-15 20:04:15 -050032{
33 return netif_receive_skb(skb);
34}
35
Herbert Xu68b7c892010-02-27 19:41:40 +000036static int br_pass_frame_up(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037{
Herbert Xu68b7c892010-02-27 19:41:40 +000038 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
stephen hemminger14bb4782010-03-02 13:32:09 +000039 struct net_bridge *br = netdev_priv(brdev);
Nikolay Aleksandrov2594e9062015-09-25 19:00:11 +020040 struct net_bridge_vlan_group *vg;
Li RongQing8f849852014-01-04 13:57:59 +080041 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Eric Dumazet406818f2010-06-23 13:00:48 -070043 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000044 brstats->rx_packets++;
45 brstats->rx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070046 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Nikolay Aleksandrov907b1e62015-10-12 21:47:02 +020048 vg = br_vlan_group_rcu(br);
Vlad Yasevich85f46c62013-02-13 12:00:11 +000049 /* Bridge is just like any other port. Make sure the
50 * packet is allowed except in promisc modue when someone
51 * may be running packet capture.
52 */
53 if (!(brdev->flags & IFF_PROMISC) &&
Nikolay Aleksandrov2594e9062015-09-25 19:00:11 +020054 !br_allowed_egress(vg, skb)) {
Vlad Yasevich85f46c62013-02-13 12:00:11 +000055 kfree_skb(skb);
56 return NET_RX_DROP;
57 }
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 indev = skb->dev;
Pavel Emelyanova339f1c2008-05-21 14:13:47 -070060 skb->dev = brdev;
Roopa Prabhu11538d02017-01-31 22:59:55 -080061 skb = br_handle_vlan(br, NULL, vg, skb);
Vlad Yasevichfc92f742014-03-27 21:51:18 -040062 if (!skb)
63 return NET_RX_DROP;
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +020064 /* update the multicast stats if the packet is IGMP/MLD */
Nikolay Aleksandrova65056e2016-07-06 12:12:21 -070065 br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +020066 BR_MCAST_DIR_TX);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Eric W. Biederman29a26a52015-09-15 20:04:16 -050068 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
69 dev_net(indev), NULL, skb, indev, NULL,
Eric W. Biederman04eb4482015-09-15 20:04:15 -050070 br_netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Kyeyoon Park95850112014-10-23 14:49:17 -070073static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
Jouni Malinen842a9ae2015-03-04 12:54:21 +020074 u16 vid, struct net_bridge_port *p)
Kyeyoon Park95850112014-10-23 14:49:17 -070075{
76 struct net_device *dev = br->dev;
77 struct neighbour *n;
78 struct arphdr *parp;
79 u8 *arpptr, *sha;
80 __be32 sip, tip;
81
Jouni Malinen842a9ae2015-03-04 12:54:21 +020082 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
83
Nikolay Aleksandrov85a3d4a2016-08-30 17:44:29 +020084 if ((dev->flags & IFF_NOARP) ||
85 !pskb_may_pull(skb, arp_hdr_len(dev)))
Kyeyoon Park95850112014-10-23 14:49:17 -070086 return;
87
Kyeyoon Park95850112014-10-23 14:49:17 -070088 parp = arp_hdr(skb);
89
90 if (parp->ar_pro != htons(ETH_P_IP) ||
91 parp->ar_op != htons(ARPOP_REQUEST) ||
92 parp->ar_hln != dev->addr_len ||
93 parp->ar_pln != 4)
94 return;
95
96 arpptr = (u8 *)parp + sizeof(struct arphdr);
97 sha = arpptr;
98 arpptr += dev->addr_len; /* sha */
99 memcpy(&sip, arpptr, sizeof(sip));
100 arpptr += sizeof(sip);
101 arpptr += dev->addr_len; /* tha */
102 memcpy(&tip, arpptr, sizeof(tip));
103
104 if (ipv4_is_loopback(tip) ||
105 ipv4_is_multicast(tip))
106 return;
107
108 n = neigh_lookup(&arp_tbl, &tip, dev);
109 if (n) {
110 struct net_bridge_fdb_entry *f;
111
112 if (!(n->nud_state & NUD_VALID)) {
113 neigh_release(n);
114 return;
115 }
116
117 f = __br_fdb_get(br, n->ha, vid);
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200118 if (f && ((p->flags & BR_PROXYARP) ||
119 (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
Kyeyoon Park95850112014-10-23 14:49:17 -0700120 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
121 sha, n->ha, sha);
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200122 BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
123 }
Kyeyoon Park95850112014-10-23 14:49:17 -0700124
125 neigh_release(n);
126 }
127}
128
stephen hemmingereeaf61d2010-07-27 08:26:30 +0000129/* note: already called with rcu_read_lock */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500130int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000132 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300133 const unsigned char *dest = eth_hdr(skb)->h_dest;
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200134 enum br_pkt_type pkt_type = BR_PKT_UNICAST;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300135 struct net_bridge_fdb_entry *dst = NULL;
Herbert Xuc4fcb782010-02-27 19:41:48 +0000136 struct net_bridge_mdb_entry *mdst;
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200137 bool local_rcv, mcast_hit = false;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300138 struct net_bridge *br;
Vlad Yasevich78851982013-02-13 12:00:14 +0000139 u16 vid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800141 if (!p || p->state == BR_STATE_DISABLED)
142 goto drop;
Stephen Hemminger85967bb2005-05-29 14:15:55 -0700143
Nikolay Aleksandrov907b1e62015-10-12 21:47:02 +0200144 if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
Toshiaki Makitaeb707612014-04-09 17:00:30 +0900145 goto out;
Vlad Yasevicha37b85c2013-02-13 12:00:10 +0000146
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200147 nbp_switchdev_frame_mark(p, skb);
148
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800149 /* insert into forwarding database after filtering to avoid spoofing */
150 br = p->br;
Vlad Yasevich9ba18892013-06-05 10:08:00 -0400151 if (p->flags & BR_LEARNING)
Toshiaki Makitaa5642ab2014-02-07 16:48:18 +0900152 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800153
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200154 local_rcv = !!(br->dev->flags & IFF_PROMISC);
155 if (is_multicast_ether_addr(dest)) {
156 /* by definition the broadcast is also a multicast address */
157 if (is_broadcast_ether_addr(dest)) {
158 pkt_type = BR_PKT_BROADCAST;
159 local_rcv = true;
160 } else {
161 pkt_type = BR_PKT_MULTICAST;
162 if (br_multicast_rcv(br, p, skb, vid))
163 goto drop;
164 }
165 }
Herbert Xuc4fcb782010-02-27 19:41:48 +0000166
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800167 if (p->state == BR_STATE_LEARNING)
168 goto drop;
Stephen Hemminger0e5eaba2005-12-21 19:00:18 -0800169
Herbert Xu68b7c892010-02-27 19:41:40 +0000170 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
171
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200172 if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
173 br_do_proxy_arp(skb, br, vid, p);
Kyeyoon Park95850112014-10-23 14:49:17 -0700174
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200175 switch (pkt_type) {
176 case BR_PKT_MULTICAST:
Cong Wangfbca58a2013-03-07 03:05:33 +0000177 mdst = br_mdb_get(br, skb, vid);
Linus Lüssingb00589a2013-08-01 01:06:20 +0200178 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
Linus Lüssingcc0fdd82013-08-30 17:28:17 +0200179 br_multicast_querier_exists(br, eth_hdr(skb))) {
Herbert Xu8a870172011-02-12 01:05:42 -0800180 if ((mdst && mdst->mglist) ||
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300181 br_multicast_is_router(br)) {
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300182 local_rcv = true;
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300183 br->dev->stats.multicast++;
184 }
185 mcast_hit = true;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300186 } else {
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300187 local_rcv = true;
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300188 br->dev->stats.multicast++;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300189 }
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200190 break;
191 case BR_PKT_UNICAST:
192 dst = __br_fdb_get(br, dest, vid);
193 default:
194 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300197 if (dst) {
stephen hemmingerca6d4482017-02-07 08:46:46 -0800198 unsigned long now = jiffies;
199
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200200 if (dst->is_local)
201 return br_pass_frame_up(skb);
202
stephen hemmingerca6d4482017-02-07 08:46:46 -0800203 if (now != dst->used)
204 dst->used = now;
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +0300205 br_forward(dst->dst, skb, local_rcv, false);
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300206 } else {
207 if (!mcast_hit)
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200208 br_flood(br, skb, pkt_type, local_rcv, false);
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300209 else
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +0300210 br_multicast_flood(mdst, skb, local_rcv, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300213 if (local_rcv)
214 return br_pass_frame_up(skb);
Herbert Xu87557c12010-02-27 19:41:39 +0000215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216out:
217 return 0;
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800218drop:
219 kfree_skb(skb);
220 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +0200222EXPORT_SYMBOL_GPL(br_handle_frame_finish);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Ido Schimmel56fae402016-06-07 12:06:58 +0300224static void __br_handle_local_finish(struct sk_buff *skb)
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800225{
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000226 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
Vlad Yasevich2ba071e2013-02-13 12:00:16 +0000227 u16 vid = 0;
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800228
Toshiaki Makitae0d79682014-05-26 15:15:53 +0900229 /* check if vlan is allowed, to avoid spoofing */
230 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
Toshiaki Makitaa5642ab2014-02-07 16:48:18 +0900231 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
Ido Schimmel56fae402016-06-07 12:06:58 +0300232}
233
234/* note: already called with rcu_read_lock */
235static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
236{
237 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
238
239 __br_handle_local_finish(skb);
Florian Westphal8626c562016-03-12 11:14:42 +0100240
241 BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
242 br_pass_frame_up(skb);
243 return 0;
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246/*
Stephen Hemminger6229e362007-03-21 13:38:47 -0700247 * Return NULL if skb is handled
stephen hemmingereeaf61d2010-07-27 08:26:30 +0000248 * note: already called with rcu_read_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 */
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000250rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251{
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000252 struct net_bridge_port *p;
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000253 struct sk_buff *skb = *pskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 const unsigned char *dest = eth_hdr(skb)->h_dest;
Eric Dumazeta386f992010-11-15 06:38:11 +0000255 br_should_route_hook_t *rhook;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Simon Hormanc2368e72010-08-22 17:35:32 +0000257 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000258 return RX_HANDLER_PASS;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
Stephen Hemminger467aea02007-03-21 13:42:06 -0700261 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Herbert Xu7b995652007-10-14 00:39:01 -0700263 skb = skb_share_check(skb, GFP_ATOMIC);
264 if (!skb)
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000265 return RX_HANDLER_CONSUMED;
Herbert Xu7b995652007-10-14 00:39:01 -0700266
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000267 p = br_port_get_rcu(skb->dev);
Roopa Prabhu11538d02017-01-31 22:59:55 -0800268 if (p->flags & BR_VLAN_TUNNEL) {
269 if (br_handle_ingress_vlan_tunnel(skb, p,
270 nbp_vlan_group_rcu(p)))
271 goto drop;
272 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000273
Ben Hutchings46acc462012-11-01 09:11:11 +0000274 if (unlikely(is_link_local_ether_addr(dest))) {
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900275 u16 fwd_mask = p->br->group_fwd_mask_required;
276
stephen hemminger515853c2011-10-03 18:14:46 +0000277 /*
278 * See IEEE 802.1D Table 7-10 Reserved addresses
279 *
280 * Assignment Value
281 * Bridge Group Address 01-80-C2-00-00-00
282 * (MAC Control) 802.3 01-80-C2-00-00-01
283 * (Link Aggregation) 802.3 01-80-C2-00-00-02
284 * 802.1X PAE address 01-80-C2-00-00-03
285 *
286 * 802.1AB LLDP 01-80-C2-00-00-0E
287 *
288 * Others reserved for future standardization
289 */
290 switch (dest[5]) {
291 case 0x00: /* Bridge Group Address */
292 /* If STP is turned off,
293 then must forward to keep loop detection */
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900294 if (p->br->stp_enabled == BR_NO_STP ||
295 fwd_mask & (1u << dest[5]))
stephen hemminger515853c2011-10-03 18:14:46 +0000296 goto forward;
Ido Schimmel56fae402016-06-07 12:06:58 +0300297 *pskb = skb;
298 __br_handle_local_finish(skb);
299 return RX_HANDLER_PASS;
stephen hemminger515853c2011-10-03 18:14:46 +0000300
301 case 0x01: /* IEEE MAC (Pause) */
Stephen Hemminger2111f8b2007-04-25 22:05:55 -0700302 goto drop;
303
Ido Schimmelbaedbe52016-07-22 14:56:20 +0300304 case 0x0E: /* 802.1AB LLDP */
305 fwd_mask |= p->br->group_fwd_mask;
306 if (fwd_mask & (1u << dest[5]))
307 goto forward;
308 *pskb = skb;
309 __br_handle_local_finish(skb);
310 return RX_HANDLER_PASS;
311
stephen hemminger515853c2011-10-03 18:14:46 +0000312 default:
313 /* Allow selective forwarding for most other protocols */
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900314 fwd_mask |= p->br->group_fwd_mask;
315 if (fwd_mask & (1u << dest[5]))
stephen hemminger515853c2011-10-03 18:14:46 +0000316 goto forward;
317 }
Stephen Hemmingera598f6a2009-05-15 06:10:13 +0000318
stephen hemminger515853c2011-10-03 18:14:46 +0000319 /* Deliver packet to local host only */
Florian Westphal8626c562016-03-12 11:14:42 +0100320 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
321 NULL, skb, skb->dev, NULL, br_handle_local_finish);
322 return RX_HANDLER_CONSUMED;
Stephen Hemminger2111f8b2007-04-25 22:05:55 -0700323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Stephen Hemmingera598f6a2009-05-15 06:10:13 +0000325forward:
Stephen Hemminger467aea02007-03-21 13:42:06 -0700326 switch (p->state) {
327 case BR_STATE_FORWARDING:
Pavel Emelyanov82de3822007-11-29 23:58:58 +1100328 rhook = rcu_dereference(br_should_route_hook);
Eric Dumazeta386f992010-11-15 06:38:11 +0000329 if (rhook) {
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000330 if ((*rhook)(skb)) {
331 *pskb = skb;
332 return RX_HANDLER_PASS;
333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 dest = eth_hdr(skb)->h_dest;
335 }
Stephen Hemminger467aea02007-03-21 13:42:06 -0700336 /* fall through */
337 case BR_STATE_LEARNING:
Joe Perches9a7b6ef92012-05-08 18:56:49 +0000338 if (ether_addr_equal(p->br->dev->dev_addr, dest))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 skb->pkt_type = PACKET_HOST;
340
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500341 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
342 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 br_handle_frame_finish);
Stephen Hemminger467aea02007-03-21 13:42:06 -0700344 break;
345 default:
346drop:
347 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000349 return RX_HANDLER_CONSUMED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}