blob: 0485bf7f8f030d59bc6e9ee499051e99d9ab53d6 [file] [log] [blame]
Dmitry Kozlov00959ad2010-08-21 23:05:39 -07001/*
2 * GRE over IPv4 demultiplexer driver
3 *
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Joe Perchesafd465032012-03-12 07:03:32 +000013#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070015#include <linux/module.h>
Pravin B Shelarbda7bb42013-06-17 17:49:38 -070016#include <linux/if.h>
17#include <linux/icmp.h>
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070018#include <linux/kernel.h>
19#include <linux/kmod.h>
20#include <linux/skbuff.h>
21#include <linux/in.h>
xeb@mail.ru559fafb2011-07-22 20:49:40 +000022#include <linux/ip.h>
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070023#include <linux/netdevice.h>
Pravin B Shelar68c33162013-02-14 14:02:41 +000024#include <linux/if_tunnel.h>
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070025#include <linux/spinlock.h>
26#include <net/protocol.h>
27#include <net/gre.h>
28
Pravin B Shelarbda7bb42013-06-17 17:49:38 -070029#include <net/icmp.h>
30#include <net/route.h>
31#include <net/xfrm.h>
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070032
Eric Dumazet6f0bcf12010-10-24 21:33:16 +000033static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
Pravin B Shelarbda7bb42013-06-17 17:49:38 -070034static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070035
36int gre_add_protocol(const struct gre_protocol *proto, u8 version)
37{
38 if (version >= GREPROTO_MAX)
Pravin B Shelar20fd4d12013-06-17 17:49:32 -070039 return -EINVAL;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070040
Pravin B Shelar20fd4d12013-06-17 17:49:32 -070041 return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ?
42 0 : -EBUSY;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070043}
44EXPORT_SYMBOL_GPL(gre_add_protocol);
45
46int gre_del_protocol(const struct gre_protocol *proto, u8 version)
47{
Pravin B Shelar20fd4d12013-06-17 17:49:32 -070048 int ret;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070049
Pravin B Shelar20fd4d12013-06-17 17:49:32 -070050 if (version >= GREPROTO_MAX)
51 return -EINVAL;
52
53 ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
54 0 : -EBUSY;
55
56 if (ret)
57 return ret;
58
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070059 synchronize_rcu();
60 return 0;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -070061}
62EXPORT_SYMBOL_GPL(gre_del_protocol);
63
Pravin B Shelar752f36d2013-06-17 17:49:45 -070064void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
65 int hdr_len)
66{
67 struct gre_base_hdr *greh;
68
69 skb_push(skb, hdr_len);
70
Amritha Nambiard0a7ebb2014-07-10 17:29:21 -070071 skb_reset_transport_header(skb);
Pravin B Shelar752f36d2013-06-17 17:49:45 -070072 greh = (struct gre_base_hdr *)skb->data;
73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
74 greh->protocol = tpi->proto;
75
76 if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
77 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
78
79 if (tpi->flags&TUNNEL_SEQ) {
80 *ptr = tpi->seq;
81 ptr--;
82 }
83 if (tpi->flags&TUNNEL_KEY) {
84 *ptr = tpi->key;
85 ptr--;
86 }
87 if (tpi->flags&TUNNEL_CSUM &&
Tom Herbert4749c092014-06-04 17:20:23 -070088 !(skb_shinfo(skb)->gso_type &
89 (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
Pravin B Shelar752f36d2013-06-17 17:49:45 -070090 *ptr = 0;
91 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
92 skb->len, 0));
93 }
94 }
95}
96EXPORT_SYMBOL_GPL(gre_build_header);
97
Pravin B Shelarbda7bb42013-06-17 17:49:38 -070098static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
99 bool *csum_err)
100{
101 unsigned int ip_hlen = ip_hdrlen(skb);
102 const struct gre_base_hdr *greh;
103 __be32 *options;
104 int hdr_len;
105
106 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
107 return -EINVAL;
108
109 greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
110 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
111 return -EINVAL;
112
113 tpi->flags = gre_flags_to_tnl_flags(greh->flags);
114 hdr_len = ip_gre_calc_hlen(tpi->flags);
115
116 if (!pskb_may_pull(skb, hdr_len))
117 return -EINVAL;
118
119 greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
120 tpi->proto = greh->protocol;
121
122 options = (__be32 *)(greh + 1);
123 if (greh->flags & GRE_CSUM) {
Tom Herbertb1036c62014-05-07 16:51:46 -0700124 if (skb_checksum_simple_validate(skb)) {
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700125 *csum_err = true;
126 return -EINVAL;
127 }
128 options++;
129 }
130
131 if (greh->flags & GRE_KEY) {
132 tpi->key = *options;
133 options++;
134 } else
135 tpi->key = 0;
136
137 if (unlikely(greh->flags & GRE_SEQ)) {
138 tpi->seq = *options;
139 options++;
140 } else
141 tpi->seq = 0;
142
143 /* WCCP version 1 and 2 protocol decoding.
144 * - Change protocol to IP
145 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
146 */
147 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
148 tpi->proto = htons(ETH_P_IP);
149 if ((*(u8 *)options & 0xF0) != 0x40) {
150 hdr_len += 4;
151 if (!pskb_may_pull(skb, hdr_len))
152 return -EINVAL;
153 }
154 }
Pravin B Shelar3d7b46c2013-06-17 17:50:02 -0700155
156 return iptunnel_pull_header(skb, hdr_len, tpi->proto);
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700157}
158
159static int gre_cisco_rcv(struct sk_buff *skb)
160{
161 struct tnl_ptk_info tpi;
162 int i;
163 bool csum_err = false;
164
Pravin B Shelarfbd02dd2014-03-23 22:06:36 -0700165#ifdef CONFIG_NET_IPGRE_BROADCAST
166 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
167 /* Looped back packet, drop it! */
168 if (rt_is_output_route(skb_rtable(skb)))
169 goto drop;
170 }
171#endif
172
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700173 if (parse_gre_header(skb, &tpi, &csum_err) < 0)
174 goto drop;
175
176 rcu_read_lock();
177 for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
178 struct gre_cisco_protocol *proto;
179 int ret;
180
181 proto = rcu_dereference(gre_cisco_proto_list[i]);
182 if (!proto)
183 continue;
184 ret = proto->handler(skb, &tpi);
185 if (ret == PACKET_RCVD) {
186 rcu_read_unlock();
187 return 0;
188 }
189 }
190 rcu_read_unlock();
191
192 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
193drop:
194 kfree_skb(skb);
195 return 0;
196}
197
198static void gre_cisco_err(struct sk_buff *skb, u32 info)
199{
200 /* All the routers (except for Linux) return only
201 * 8 bytes of packet payload. It means, that precise relaying of
202 * ICMP in the real Internet is absolutely infeasible.
203 *
204 * Moreover, Cisco "wise men" put GRE key to the third word
205 * in GRE header. It makes impossible maintaining even soft
206 * state for keyed
207 * GRE tunnels with enabled checksum. Tell them "thank you".
208 *
209 * Well, I wonder, rfc1812 was written by Cisco employee,
210 * what the hell these idiots break standards established
211 * by themselves???
212 */
213
214 const int type = icmp_hdr(skb)->type;
215 const int code = icmp_hdr(skb)->code;
216 struct tnl_ptk_info tpi;
217 bool csum_err = false;
218 int i;
219
220 if (parse_gre_header(skb, &tpi, &csum_err)) {
221 if (!csum_err) /* ignore csum errors. */
222 return;
223 }
224
225 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
226 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
227 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
228 return;
229 }
230 if (type == ICMP_REDIRECT) {
231 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
232 IPPROTO_GRE, 0);
233 return;
234 }
235
236 rcu_read_lock();
237 for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
238 struct gre_cisco_protocol *proto;
239
240 proto = rcu_dereference(gre_cisco_proto_list[i]);
241 if (!proto)
242 continue;
243
244 if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
245 goto out;
246
247 }
248out:
249 rcu_read_unlock();
250}
251
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700252static int gre_rcv(struct sk_buff *skb)
253{
254 const struct gre_protocol *proto;
255 u8 ver;
256 int ret;
257
258 if (!pskb_may_pull(skb, 12))
259 goto drop;
260
261 ver = skb->data[1]&0x7f;
262 if (ver >= GREPROTO_MAX)
263 goto drop;
264
265 rcu_read_lock();
266 proto = rcu_dereference(gre_proto[ver]);
267 if (!proto || !proto->handler)
268 goto drop_unlock;
269 ret = proto->handler(skb);
270 rcu_read_unlock();
271 return ret;
272
273drop_unlock:
274 rcu_read_unlock();
275drop:
276 kfree_skb(skb);
277 return NET_RX_DROP;
278}
279
280static void gre_err(struct sk_buff *skb, u32 info)
281{
282 const struct gre_protocol *proto;
xeb@mail.ru559fafb2011-07-22 20:49:40 +0000283 const struct iphdr *iph = (const struct iphdr *)skb->data;
284 u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700285
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700286 if (ver >= GREPROTO_MAX)
xeb@mail.ru559fafb2011-07-22 20:49:40 +0000287 return;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700288
289 rcu_read_lock();
290 proto = rcu_dereference(gre_proto[ver]);
xeb@mail.ru559fafb2011-07-22 20:49:40 +0000291 if (proto && proto->err_handler)
292 proto->err_handler(skb, info);
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700293 rcu_read_unlock();
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700294}
295
296static const struct net_protocol net_gre_protocol = {
297 .handler = gre_rcv,
298 .err_handler = gre_err,
299 .netns_ok = 1,
300};
301
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700302static const struct gre_protocol ipgre_protocol = {
303 .handler = gre_cisco_rcv,
304 .err_handler = gre_cisco_err,
305};
306
307int gre_cisco_register(struct gre_cisco_protocol *newp)
308{
309 struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
310 &gre_cisco_proto_list[newp->priority];
311
312 return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
313}
314EXPORT_SYMBOL_GPL(gre_cisco_register);
315
316int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
317{
318 struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
319 &gre_cisco_proto_list[del_proto->priority];
320 int ret;
321
322 ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
323
324 if (ret)
325 return ret;
326
327 synchronize_net();
328 return 0;
329}
330EXPORT_SYMBOL_GPL(gre_cisco_unregister);
331
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700332static int __init gre_init(void)
333{
Joe Perchesafd465032012-03-12 07:03:32 +0000334 pr_info("GRE over IPv4 demultiplexor driver\n");
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700335
336 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
Joe Perchesafd465032012-03-12 07:03:32 +0000337 pr_err("can't add protocol\n");
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700338 goto err;
339 }
340
341 if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
342 pr_info("%s: can't add ipgre handler\n", __func__);
343 goto err_gre;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700344 }
345
346 return 0;
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700347err_gre:
348 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
349err:
350 return -EAGAIN;
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700351}
352
353static void __exit gre_exit(void)
354{
Pravin B Shelarbda7bb42013-06-17 17:49:38 -0700355 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
Dmitry Kozlov00959ad2010-08-21 23:05:39 -0700356 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
357}
358
359module_init(gre_init);
360module_exit(gre_exit);
361
362MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
363MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
364MODULE_LICENSE("GPL");