blob: 10daefd7f9388ebb439571dbb1e52fdf28cfe94c [file] [log] [blame]
Eric W. Biederman01891972015-03-03 19:10:47 -06001#include <linux/types.h>
2#include <linux/skbuff.h>
3#include <linux/socket.h>
Eric W. Biederman7720c012015-03-03 19:11:20 -06004#include <linux/sysctl.h>
Eric W. Biederman01891972015-03-03 19:10:47 -06005#include <linux/net.h>
6#include <linux/module.h>
7#include <linux/if_arp.h>
8#include <linux/ipv6.h>
9#include <linux/mpls.h>
David Ahern24045a02017-02-20 08:03:30 -080010#include <linux/netconf.h>
Stephen Rothwell4b5edb22015-03-05 13:37:05 +110011#include <linux/vmalloc.h>
Robert Shearman27d69102017-01-16 14:16:37 +000012#include <linux/percpu.h>
Eric W. Biederman01891972015-03-03 19:10:47 -060013#include <net/ip.h>
14#include <net/dst.h>
15#include <net/sock.h>
16#include <net/arp.h>
17#include <net/ip_fib.h>
18#include <net/netevent.h>
19#include <net/netns/generic.h>
Roopa Prabhubf215632015-07-30 13:34:54 -070020#if IS_ENABLED(CONFIG_IPV6)
21#include <net/ipv6.h>
Roopa Prabhubf215632015-07-30 13:34:54 -070022#endif
Robert Shearman27d69102017-01-16 14:16:37 +000023#include <net/addrconf.h>
Roopa Prabhuf8efb732015-10-23 06:03:27 -070024#include <net/nexthop.h>
Eric W. Biederman01891972015-03-03 19:10:47 -060025#include "internal.h"
26
David Aherna4ac8c92017-03-31 07:14:03 -070027/* put a reasonable limit on the number of labels
28 * we will accept from userspace
29 */
30#define MAX_NEW_LABELS 30
David Ahern59b20962017-03-31 07:14:01 -070031
David Aherndf1c6312017-03-31 07:14:02 -070032/* max memory we will use for mpls_route */
33#define MAX_MPLS_ROUTE_MEM 4096
34
Robert Shearman1c78efa2015-10-23 06:03:28 -070035/* Maximum number of labels to look ahead at when selecting a path of
36 * a multipath route
37 */
38#define MAX_MP_SELECT_LABELS 4
39
Robert Shearmaneb7809f2015-12-10 19:30:50 +000040#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
41
Eric W. Biederman7720c012015-03-03 19:11:20 -060042static int zero = 0;
Robert Shearman5b441ac2017-03-10 20:43:24 +000043static int one = 1;
Eric W. Biederman7720c012015-03-03 19:11:20 -060044static int label_limit = (1 << 20) - 1;
Robert Shearmana59166e2017-03-10 20:43:25 +000045static int ttl_max = 255;
Eric W. Biederman7720c012015-03-03 19:11:20 -060046
Eric W. Biederman8de147d2015-03-03 19:14:31 -060047static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
48 struct nlmsghdr *nlh, struct net *net, u32 portid,
49 unsigned int nlm_flags);
50
Eric W. Biederman01891972015-03-03 19:10:47 -060051static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
52{
53 struct mpls_route *rt = NULL;
54
55 if (index < net->mpls.platform_labels) {
56 struct mpls_route __rcu **platform_label =
57 rcu_dereference(net->mpls.platform_label);
58 rt = rcu_dereference(platform_label[index]);
59 }
60 return rt;
61}
62
Roopa Prabhuface0182015-07-21 10:43:52 +020063bool mpls_output_possible(const struct net_device *dev)
Eric W. Biederman01891972015-03-03 19:10:47 -060064{
65 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
66}
Roopa Prabhuface0182015-07-21 10:43:52 +020067EXPORT_SYMBOL_GPL(mpls_output_possible);
Eric W. Biederman01891972015-03-03 19:10:47 -060068
Robert Shearmancf4b24f2015-10-27 00:37:36 +000069static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
70{
David Ahern59b20962017-03-31 07:14:01 -070071 return (u8 *)nh + rt->rt_via_offset;
Robert Shearmancf4b24f2015-10-27 00:37:36 +000072}
73
74static const u8 *mpls_nh_via(const struct mpls_route *rt,
75 const struct mpls_nh *nh)
76{
77 return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
78}
79
Roopa Prabhuf8efb732015-10-23 06:03:27 -070080static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
Eric W. Biederman01891972015-03-03 19:10:47 -060081{
82 /* The size of the layer 2.5 labels to be added for this route */
Roopa Prabhuf8efb732015-10-23 06:03:27 -070083 return nh->nh_labels * sizeof(struct mpls_shim_hdr);
Eric W. Biederman01891972015-03-03 19:10:47 -060084}
85
Roopa Prabhuface0182015-07-21 10:43:52 +020086unsigned int mpls_dev_mtu(const struct net_device *dev)
Eric W. Biederman01891972015-03-03 19:10:47 -060087{
88 /* The amount of data the layer 2 frame can hold */
89 return dev->mtu;
90}
Roopa Prabhuface0182015-07-21 10:43:52 +020091EXPORT_SYMBOL_GPL(mpls_dev_mtu);
Eric W. Biederman01891972015-03-03 19:10:47 -060092
Roopa Prabhuface0182015-07-21 10:43:52 +020093bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
Eric W. Biederman01891972015-03-03 19:10:47 -060094{
95 if (skb->len <= mtu)
96 return false;
97
Marcelo Ricardo Leitnerae7ef812016-06-02 15:05:41 -030098 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
Eric W. Biederman01891972015-03-03 19:10:47 -060099 return false;
100
101 return true;
102}
Roopa Prabhuface0182015-07-21 10:43:52 +0200103EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
Eric W. Biederman01891972015-03-03 19:10:47 -0600104
Robert Shearman27d69102017-01-16 14:16:37 +0000105void mpls_stats_inc_outucastpkts(struct net_device *dev,
106 const struct sk_buff *skb)
107{
108 struct mpls_dev *mdev;
109
110 if (skb->protocol == htons(ETH_P_MPLS_UC)) {
111 mdev = mpls_dev_get(dev);
112 if (mdev)
113 MPLS_INC_STATS_LEN(mdev, skb->len,
114 tx_packets,
115 tx_bytes);
116 } else if (skb->protocol == htons(ETH_P_IP)) {
117 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
118#if IS_ENABLED(CONFIG_IPV6)
119 } else if (skb->protocol == htons(ETH_P_IPV6)) {
120 struct inet6_dev *in6dev = __in6_dev_get(dev);
121
122 if (in6dev)
123 IP6_UPD_PO_STATS(dev_net(dev), in6dev,
124 IPSTATS_MIB_OUT, skb->len);
125#endif
126 }
127}
128EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
129
David Ahern9f427a0e2017-01-20 12:58:34 -0800130static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700131{
Robert Shearman1c78efa2015-10-23 06:03:28 -0700132 struct mpls_entry_decoded dec;
David Ahern9f427a0e2017-01-20 12:58:34 -0800133 unsigned int mpls_hdr_len = 0;
Robert Shearman1c78efa2015-10-23 06:03:28 -0700134 struct mpls_shim_hdr *hdr;
135 bool eli_seen = false;
136 int label_index;
Robert Shearman1c78efa2015-10-23 06:03:28 -0700137 u32 hash = 0;
138
David Ahern9f427a0e2017-01-20 12:58:34 -0800139 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
Robert Shearman1c78efa2015-10-23 06:03:28 -0700140 label_index++) {
David Ahern9f427a0e2017-01-20 12:58:34 -0800141 mpls_hdr_len += sizeof(*hdr);
142 if (!pskb_may_pull(skb, mpls_hdr_len))
Robert Shearman1c78efa2015-10-23 06:03:28 -0700143 break;
144
145 /* Read and decode the current label */
146 hdr = mpls_hdr(skb) + label_index;
147 dec = mpls_entry_decode(hdr);
148
149 /* RFC6790 - reserved labels MUST NOT be used as keys
150 * for the load-balancing function
151 */
152 if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
153 hash = jhash_1word(dec.label, hash);
154
155 /* The entropy label follows the entropy label
156 * indicator, so this means that the entropy
157 * label was just added to the hash - no need to
158 * go any deeper either in the label stack or in the
159 * payload
160 */
161 if (eli_seen)
162 break;
163 } else if (dec.label == MPLS_LABEL_ENTROPY) {
164 eli_seen = true;
165 }
166
David Ahern9f427a0e2017-01-20 12:58:34 -0800167 if (!dec.bos)
168 continue;
169
170 /* found bottom label; does skb have room for a header? */
171 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
Robert Shearman1c78efa2015-10-23 06:03:28 -0700172 const struct iphdr *v4hdr;
173
David Ahern9f427a0e2017-01-20 12:58:34 -0800174 v4hdr = (const struct iphdr *)(hdr + 1);
Robert Shearman1c78efa2015-10-23 06:03:28 -0700175 if (v4hdr->version == 4) {
176 hash = jhash_3words(ntohl(v4hdr->saddr),
177 ntohl(v4hdr->daddr),
178 v4hdr->protocol, hash);
179 } else if (v4hdr->version == 6 &&
David Ahern9f427a0e2017-01-20 12:58:34 -0800180 pskb_may_pull(skb, mpls_hdr_len +
181 sizeof(struct ipv6hdr))) {
Robert Shearman1c78efa2015-10-23 06:03:28 -0700182 const struct ipv6hdr *v6hdr;
183
David Ahern9f427a0e2017-01-20 12:58:34 -0800184 v6hdr = (const struct ipv6hdr *)(hdr + 1);
Robert Shearman1c78efa2015-10-23 06:03:28 -0700185 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
186 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
187 hash = jhash_1word(v6hdr->nexthdr, hash);
188 }
189 }
David Ahern9f427a0e2017-01-20 12:58:34 -0800190
191 break;
Robert Shearman1c78efa2015-10-23 06:03:28 -0700192 }
193
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800194 return hash;
195}
196
David Ahern59b20962017-03-31 07:14:01 -0700197static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
198{
199 return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
200}
201
David Ahern39eb8cd2017-03-31 07:13:59 -0700202/* number of alive nexthops (rt->rt_nhn_alive) and the flags for
203 * a next hop (nh->nh_flags) are modified by netdev event handlers.
204 * Since those fields can change at any moment, use READ_ONCE to
205 * access both.
206 */
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800207static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
David Ahern9f427a0e2017-01-20 12:58:34 -0800208 struct sk_buff *skb)
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800209{
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800210 u32 hash = 0;
211 int nh_index = 0;
212 int n = 0;
David Ahern77ef013a2017-03-31 07:14:00 -0700213 u8 alive;
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800214
215 /* No need to look further into packet if there's only
216 * one path
217 */
218 if (rt->rt_nhn == 1)
David Ahern59b20962017-03-31 07:14:01 -0700219 return rt->rt_nh;
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800220
David Ahern39eb8cd2017-03-31 07:13:59 -0700221 alive = READ_ONCE(rt->rt_nhn_alive);
222 if (alive == 0)
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800223 return NULL;
224
David Ahern9f427a0e2017-01-20 12:58:34 -0800225 hash = mpls_multipath_hash(rt, skb);
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800226 nh_index = hash % alive;
227 if (alive == rt->rt_nhn)
228 goto out;
229 for_nexthops(rt) {
David Ahern39eb8cd2017-03-31 07:13:59 -0700230 unsigned int nh_flags = READ_ONCE(nh->nh_flags);
231
232 if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800233 continue;
234 if (n == nh_index)
235 return nh;
236 n++;
237 } endfor_nexthops(rt);
238
Robert Shearman1c78efa2015-10-23 06:03:28 -0700239out:
David Ahern59b20962017-03-31 07:14:01 -0700240 return mpls_get_nexthop(rt, nh_index);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700241}
242
Robert Shearman5b441ac2017-03-10 20:43:24 +0000243static bool mpls_egress(struct net *net, struct mpls_route *rt,
244 struct sk_buff *skb, struct mpls_entry_decoded dec)
Eric W. Biederman01891972015-03-03 19:10:47 -0600245{
Robert Shearman118d5232015-08-06 11:04:56 +0100246 enum mpls_payload_type payload_type;
247 bool success = false;
Eric W. Biederman01891972015-03-03 19:10:47 -0600248
Eric W. Biederman76fecd82015-03-12 18:22:59 -0500249 /* The IPv4 code below accesses through the IPv4 header
250 * checksum, which is 12 bytes into the packet.
251 * The IPv6 code below accesses through the IPv6 hop limit
252 * which is 8 bytes into the packet.
253 *
254 * For all supported cases there should always be at least 12
255 * bytes of packet data present. The IPv4 header is 20 bytes
256 * without options and the IPv6 header is always 40 bytes
257 * long.
258 */
259 if (!pskb_may_pull(skb, 12))
260 return false;
261
Robert Shearman118d5232015-08-06 11:04:56 +0100262 payload_type = rt->rt_payload_type;
263 if (payload_type == MPT_UNSPEC)
264 payload_type = ip_hdr(skb)->version;
265
266 switch (payload_type) {
267 case MPT_IPV4: {
268 struct iphdr *hdr4 = ip_hdr(skb);
Robert Shearman5b441ac2017-03-10 20:43:24 +0000269 u8 new_ttl;
Eric W. Biederman01891972015-03-03 19:10:47 -0600270 skb->protocol = htons(ETH_P_IP);
Robert Shearman5b441ac2017-03-10 20:43:24 +0000271
272 /* If propagating TTL, take the decremented TTL from
273 * the incoming MPLS header, otherwise decrement the
274 * TTL, but only if not 0 to avoid underflow.
275 */
276 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
277 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
278 net->mpls.ip_ttl_propagate))
279 new_ttl = dec.ttl;
280 else
281 new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
282
Eric W. Biederman01891972015-03-03 19:10:47 -0600283 csum_replace2(&hdr4->check,
284 htons(hdr4->ttl << 8),
Robert Shearman5b441ac2017-03-10 20:43:24 +0000285 htons(new_ttl << 8));
286 hdr4->ttl = new_ttl;
Robert Shearman118d5232015-08-06 11:04:56 +0100287 success = true;
288 break;
Eric W. Biederman01891972015-03-03 19:10:47 -0600289 }
Robert Shearman118d5232015-08-06 11:04:56 +0100290 case MPT_IPV6: {
Eric W. Biederman01891972015-03-03 19:10:47 -0600291 struct ipv6hdr *hdr6 = ipv6_hdr(skb);
292 skb->protocol = htons(ETH_P_IPV6);
Robert Shearman5b441ac2017-03-10 20:43:24 +0000293
294 /* If propagating TTL, take the decremented TTL from
295 * the incoming MPLS header, otherwise decrement the
296 * hop limit, but only if not 0 to avoid underflow.
297 */
298 if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
299 (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
300 net->mpls.ip_ttl_propagate))
301 hdr6->hop_limit = dec.ttl;
302 else if (hdr6->hop_limit)
303 hdr6->hop_limit = hdr6->hop_limit - 1;
Robert Shearman118d5232015-08-06 11:04:56 +0100304 success = true;
305 break;
Eric W. Biederman01891972015-03-03 19:10:47 -0600306 }
Robert Shearman118d5232015-08-06 11:04:56 +0100307 case MPT_UNSPEC:
Robert Shearman5b441ac2017-03-10 20:43:24 +0000308 /* Should have decided which protocol it is by now */
Robert Shearman118d5232015-08-06 11:04:56 +0100309 break;
310 }
311
Eric W. Biederman01891972015-03-03 19:10:47 -0600312 return success;
313}
314
315static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
316 struct packet_type *pt, struct net_device *orig_dev)
317{
318 struct net *net = dev_net(dev);
319 struct mpls_shim_hdr *hdr;
320 struct mpls_route *rt;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700321 struct mpls_nh *nh;
Eric W. Biederman01891972015-03-03 19:10:47 -0600322 struct mpls_entry_decoded dec;
323 struct net_device *out_dev;
Robert Shearman27d69102017-01-16 14:16:37 +0000324 struct mpls_dev *out_mdev;
Robert Shearman03c57742015-04-22 11:14:37 +0100325 struct mpls_dev *mdev;
Eric W. Biederman01891972015-03-03 19:10:47 -0600326 unsigned int hh_len;
327 unsigned int new_header_size;
328 unsigned int mtu;
329 int err;
330
331 /* Careful this entire function runs inside of an rcu critical section */
332
Robert Shearman03c57742015-04-22 11:14:37 +0100333 mdev = mpls_dev_get(dev);
Robert Shearman27d69102017-01-16 14:16:37 +0000334 if (!mdev)
Robert Shearman03c57742015-04-22 11:14:37 +0100335 goto drop;
336
Robert Shearman27d69102017-01-16 14:16:37 +0000337 MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
338 rx_bytes);
339
340 if (!mdev->input_enabled) {
341 MPLS_INC_STATS(mdev, rx_dropped);
342 goto drop;
343 }
344
Eric W. Biederman01891972015-03-03 19:10:47 -0600345 if (skb->pkt_type != PACKET_HOST)
Robert Shearman27d69102017-01-16 14:16:37 +0000346 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600347
348 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
Robert Shearman27d69102017-01-16 14:16:37 +0000349 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600350
351 if (!pskb_may_pull(skb, sizeof(*hdr)))
Robert Shearman27d69102017-01-16 14:16:37 +0000352 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600353
354 /* Read and decode the label */
355 hdr = mpls_hdr(skb);
356 dec = mpls_entry_decode(hdr);
357
Eric W. Biederman01891972015-03-03 19:10:47 -0600358 rt = mpls_route_input_rcu(net, dec.label);
Robert Shearman27d69102017-01-16 14:16:37 +0000359 if (!rt) {
360 MPLS_INC_STATS(mdev, rx_noroute);
Eric W. Biederman01891972015-03-03 19:10:47 -0600361 goto drop;
Robert Shearman27d69102017-01-16 14:16:37 +0000362 }
Eric W. Biederman01891972015-03-03 19:10:47 -0600363
David Ahern9f427a0e2017-01-20 12:58:34 -0800364 nh = mpls_select_multipath(rt, skb);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700365 if (!nh)
Robert Shearman27d69102017-01-16 14:16:37 +0000366 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600367
David Ahern9f427a0e2017-01-20 12:58:34 -0800368 /* Pop the label */
369 skb_pull(skb, sizeof(*hdr));
370 skb_reset_network_header(skb);
371
372 skb_orphan(skb);
373
Eric W. Biederman01891972015-03-03 19:10:47 -0600374 if (skb_warn_if_lro(skb))
Robert Shearman27d69102017-01-16 14:16:37 +0000375 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600376
377 skb_forward_csum(skb);
378
379 /* Verify ttl is valid */
Eric W. Biedermanaa7da932015-03-07 16:23:23 -0600380 if (dec.ttl <= 1)
Robert Shearman27d69102017-01-16 14:16:37 +0000381 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600382 dec.ttl -= 1;
383
Robert Shearman27d69102017-01-16 14:16:37 +0000384 /* Find the output device */
385 out_dev = rcu_dereference(nh->nh_dev);
386 if (!mpls_output_possible(out_dev))
387 goto tx_err;
388
Eric W. Biederman01891972015-03-03 19:10:47 -0600389 /* Verify the destination can hold the packet */
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700390 new_header_size = mpls_nh_header_size(nh);
Eric W. Biederman01891972015-03-03 19:10:47 -0600391 mtu = mpls_dev_mtu(out_dev);
392 if (mpls_pkt_too_big(skb, mtu - new_header_size))
Robert Shearman27d69102017-01-16 14:16:37 +0000393 goto tx_err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600394
395 hh_len = LL_RESERVED_SPACE(out_dev);
396 if (!out_dev->header_ops)
397 hh_len = 0;
398
399 /* Ensure there is enough space for the headers in the skb */
400 if (skb_cow(skb, hh_len + new_header_size))
Robert Shearman27d69102017-01-16 14:16:37 +0000401 goto tx_err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600402
403 skb->dev = out_dev;
404 skb->protocol = htons(ETH_P_MPLS_UC);
405
406 if (unlikely(!new_header_size && dec.bos)) {
407 /* Penultimate hop popping */
Robert Shearman5b441ac2017-03-10 20:43:24 +0000408 if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
Robert Shearman27d69102017-01-16 14:16:37 +0000409 goto err;
Eric W. Biederman01891972015-03-03 19:10:47 -0600410 } else {
411 bool bos;
412 int i;
413 skb_push(skb, new_header_size);
414 skb_reset_network_header(skb);
415 /* Push the new labels */
416 hdr = mpls_hdr(skb);
417 bos = dec.bos;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700418 for (i = nh->nh_labels - 1; i >= 0; i--) {
419 hdr[i] = mpls_entry_encode(nh->nh_label[i],
420 dec.ttl, 0, bos);
Eric W. Biederman01891972015-03-03 19:10:47 -0600421 bos = false;
422 }
423 }
424
Robert Shearman27d69102017-01-16 14:16:37 +0000425 mpls_stats_inc_outucastpkts(out_dev, skb);
426
Robert Shearmaneb7809f2015-12-10 19:30:50 +0000427 /* If via wasn't specified then send out using device address */
428 if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
429 err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
430 out_dev->dev_addr, skb);
431 else
432 err = neigh_xmit(nh->nh_via_table, out_dev,
433 mpls_nh_via(rt, nh), skb);
Eric W. Biederman01891972015-03-03 19:10:47 -0600434 if (err)
435 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
436 __func__, err);
437 return 0;
438
Robert Shearman27d69102017-01-16 14:16:37 +0000439tx_err:
440 out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
441 if (out_mdev)
442 MPLS_INC_STATS(out_mdev, tx_errors);
443 goto drop;
444err:
445 MPLS_INC_STATS(mdev, rx_errors);
Eric W. Biederman01891972015-03-03 19:10:47 -0600446drop:
447 kfree_skb(skb);
448 return NET_RX_DROP;
449}
450
451static struct packet_type mpls_packet_type __read_mostly = {
452 .type = cpu_to_be16(ETH_P_MPLS_UC),
453 .func = mpls_forward,
454};
455
Wu Fengguangf0126532015-03-05 05:33:54 +0800456static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
Eric W. Biederman03c05662015-03-03 19:13:56 -0600457 [RTA_DST] = { .type = NLA_U32 },
458 [RTA_OIF] = { .type = NLA_U32 },
Robert Shearman5b441ac2017-03-10 20:43:24 +0000459 [RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
Eric W. Biederman03c05662015-03-03 19:13:56 -0600460};
461
Eric W. Biedermana2519922015-03-03 19:12:40 -0600462struct mpls_route_config {
Robert Shearman118d5232015-08-06 11:04:56 +0100463 u32 rc_protocol;
464 u32 rc_ifindex;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700465 u8 rc_via_table;
466 u8 rc_via_alen;
Robert Shearman118d5232015-08-06 11:04:56 +0100467 u8 rc_via[MAX_VIA_ALEN];
468 u32 rc_label;
Robert Shearman5b441ac2017-03-10 20:43:24 +0000469 u8 rc_ttl_propagate;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700470 u8 rc_output_labels;
Robert Shearman118d5232015-08-06 11:04:56 +0100471 u32 rc_output_label[MAX_NEW_LABELS];
472 u32 rc_nlflags;
473 enum mpls_payload_type rc_payload_type;
474 struct nl_info rc_nlinfo;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700475 struct rtnexthop *rc_mp;
476 int rc_mp_len;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600477};
478
David Ahern59b20962017-03-31 07:14:01 -0700479/* all nexthops within a route have the same size based on max
480 * number of labels and max via length for a hop
481 */
482static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
Eric W. Biederman01891972015-03-03 19:10:47 -0600483{
David Ahern59b20962017-03-31 07:14:01 -0700484 u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
Eric W. Biederman01891972015-03-03 19:10:47 -0600485 struct mpls_route *rt;
David Aherndf1c6312017-03-31 07:14:02 -0700486 size_t size;
Eric W. Biederman01891972015-03-03 19:10:47 -0600487
David Aherndf1c6312017-03-31 07:14:02 -0700488 size = sizeof(*rt) + num_nh * nh_size;
489 if (size > MAX_MPLS_ROUTE_MEM)
490 return ERR_PTR(-EINVAL);
491
492 rt = kzalloc(size, GFP_KERNEL);
493 if (!rt)
494 return ERR_PTR(-ENOMEM);
495
496 rt->rt_nhn = num_nh;
497 rt->rt_nhn_alive = num_nh;
498 rt->rt_nh_size = nh_size;
499 rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700500
Eric W. Biederman01891972015-03-03 19:10:47 -0600501 return rt;
502}
503
504static void mpls_rt_free(struct mpls_route *rt)
505{
506 if (rt)
507 kfree_rcu(rt, rt_rcu);
508}
509
Eric W. Biederman8de147d2015-03-03 19:14:31 -0600510static void mpls_notify_route(struct net *net, unsigned index,
511 struct mpls_route *old, struct mpls_route *new,
512 const struct nl_info *info)
513{
514 struct nlmsghdr *nlh = info ? info->nlh : NULL;
515 unsigned portid = info ? info->portid : 0;
516 int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
517 struct mpls_route *rt = new ? new : old;
518 unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
519 /* Ignore reserved labels for now */
Robert Shearmana6affd22015-08-03 17:50:04 +0100520 if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
Eric W. Biederman8de147d2015-03-03 19:14:31 -0600521 rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
522}
523
Eric W. Biederman01891972015-03-03 19:10:47 -0600524static void mpls_route_update(struct net *net, unsigned index,
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700525 struct mpls_route *new,
Eric W. Biederman01891972015-03-03 19:10:47 -0600526 const struct nl_info *info)
527{
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600528 struct mpls_route __rcu **platform_label;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700529 struct mpls_route *rt;
Eric W. Biederman01891972015-03-03 19:10:47 -0600530
531 ASSERT_RTNL();
532
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600533 platform_label = rtnl_dereference(net->mpls.platform_label);
534 rt = rtnl_dereference(platform_label[index]);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700535 rcu_assign_pointer(platform_label[index], new);
Eric W. Biederman01891972015-03-03 19:10:47 -0600536
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700537 mpls_notify_route(net, index, rt, new, info);
Eric W. Biederman8de147d2015-03-03 19:14:31 -0600538
Eric W. Biederman01891972015-03-03 19:10:47 -0600539 /* If we removed a route free it now */
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700540 mpls_rt_free(rt);
Eric W. Biederman01891972015-03-03 19:10:47 -0600541}
542
Eric W. Biedermana2519922015-03-03 19:12:40 -0600543static unsigned find_free_label(struct net *net)
544{
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600545 struct mpls_route __rcu **platform_label;
546 size_t platform_labels;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600547 unsigned index;
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600548
549 platform_label = rtnl_dereference(net->mpls.platform_label);
550 platform_labels = net->mpls.platform_labels;
Robert Shearmana6affd22015-08-03 17:50:04 +0100551 for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
552 index++) {
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600553 if (!rtnl_dereference(platform_label[index]))
Eric W. Biedermana2519922015-03-03 19:12:40 -0600554 return index;
555 }
556 return LABEL_NOT_SPECIFIED;
557}
558
Roopa Prabhubf215632015-07-30 13:34:54 -0700559#if IS_ENABLED(CONFIG_INET)
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000560static struct net_device *inet_fib_lookup_dev(struct net *net,
561 const void *addr)
Roopa Prabhu01faef22015-07-21 09:16:24 -0700562{
Dan Carpenter5a9348b2015-08-04 10:44:22 +0300563 struct net_device *dev;
Roopa Prabhu01faef22015-07-21 09:16:24 -0700564 struct rtable *rt;
565 struct in_addr daddr;
566
567 memcpy(&daddr, addr, sizeof(struct in_addr));
568 rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
569 if (IS_ERR(rt))
Dan Carpenter5a9348b2015-08-04 10:44:22 +0300570 return ERR_CAST(rt);
Roopa Prabhu01faef22015-07-21 09:16:24 -0700571
572 dev = rt->dst.dev;
573 dev_hold(dev);
574
575 ip_rt_put(rt);
576
Roopa Prabhu01faef22015-07-21 09:16:24 -0700577 return dev;
578}
Roopa Prabhubf215632015-07-30 13:34:54 -0700579#else
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000580static struct net_device *inet_fib_lookup_dev(struct net *net,
581 const void *addr)
Roopa Prabhubf215632015-07-30 13:34:54 -0700582{
583 return ERR_PTR(-EAFNOSUPPORT);
584}
585#endif
Roopa Prabhu01faef22015-07-21 09:16:24 -0700586
Roopa Prabhubf215632015-07-30 13:34:54 -0700587#if IS_ENABLED(CONFIG_IPV6)
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000588static struct net_device *inet6_fib_lookup_dev(struct net *net,
589 const void *addr)
Roopa Prabhu01faef22015-07-21 09:16:24 -0700590{
Dan Carpenter5a9348b2015-08-04 10:44:22 +0300591 struct net_device *dev;
Roopa Prabhu01faef22015-07-21 09:16:24 -0700592 struct dst_entry *dst;
593 struct flowi6 fl6;
Roopa Prabhubf215632015-07-30 13:34:54 -0700594 int err;
595
596 if (!ipv6_stub)
597 return ERR_PTR(-EAFNOSUPPORT);
Roopa Prabhu01faef22015-07-21 09:16:24 -0700598
599 memset(&fl6, 0, sizeof(fl6));
600 memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
Roopa Prabhubf215632015-07-30 13:34:54 -0700601 err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
602 if (err)
Dan Carpenter5a9348b2015-08-04 10:44:22 +0300603 return ERR_PTR(err);
Roopa Prabhu01faef22015-07-21 09:16:24 -0700604
605 dev = dst->dev;
606 dev_hold(dev);
Roopa Prabhu01faef22015-07-21 09:16:24 -0700607 dst_release(dst);
608
609 return dev;
610}
Roopa Prabhubf215632015-07-30 13:34:54 -0700611#else
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000612static struct net_device *inet6_fib_lookup_dev(struct net *net,
613 const void *addr)
Roopa Prabhubf215632015-07-30 13:34:54 -0700614{
615 return ERR_PTR(-EAFNOSUPPORT);
616}
617#endif
Roopa Prabhu01faef22015-07-21 09:16:24 -0700618
619static struct net_device *find_outdev(struct net *net,
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000620 struct mpls_route *rt,
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700621 struct mpls_nh *nh, int oif)
Roopa Prabhu01faef22015-07-21 09:16:24 -0700622{
623 struct net_device *dev = NULL;
624
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700625 if (!oif) {
626 switch (nh->nh_via_table) {
Roopa Prabhu01faef22015-07-21 09:16:24 -0700627 case NEIGH_ARP_TABLE:
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000628 dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
Roopa Prabhu01faef22015-07-21 09:16:24 -0700629 break;
630 case NEIGH_ND_TABLE:
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000631 dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
Roopa Prabhu01faef22015-07-21 09:16:24 -0700632 break;
633 case NEIGH_LINK_TABLE:
634 break;
635 }
636 } else {
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700637 dev = dev_get_by_index(net, oif);
Roopa Prabhu01faef22015-07-21 09:16:24 -0700638 }
639
Roopa Prabhu3dcb6152015-08-04 06:36:24 -0700640 if (!dev)
641 return ERR_PTR(-ENODEV);
642
Roopa Prabhu94a57f12016-04-07 21:28:38 -0700643 if (IS_ERR(dev))
644 return dev;
645
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700646 /* The caller is holding rtnl anyways, so release the dev reference */
647 dev_put(dev);
648
Roopa Prabhu01faef22015-07-21 09:16:24 -0700649 return dev;
650}
651
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000652static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
653 struct mpls_nh *nh, int oif)
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700654{
655 struct net_device *dev = NULL;
656 int err = -ENODEV;
657
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000658 dev = find_outdev(net, rt, nh, oif);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700659 if (IS_ERR(dev)) {
660 err = PTR_ERR(dev);
661 dev = NULL;
662 goto errout;
663 }
664
665 /* Ensure this is a supported device */
666 err = -EINVAL;
667 if (!mpls_dev_get(dev))
668 goto errout;
669
Robert Shearmana3e948e2015-12-10 19:30:48 +0000670 if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
671 (dev->addr_len != nh->nh_via_alen))
672 goto errout;
673
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700674 RCU_INIT_POINTER(nh->nh_dev, dev);
675
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800676 if (!(dev->flags & IFF_UP)) {
677 nh->nh_flags |= RTNH_F_DEAD;
678 } else {
679 unsigned int flags;
680
681 flags = dev_get_flags(dev);
682 if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
683 nh->nh_flags |= RTNH_F_LINKDOWN;
684 }
685
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700686 return 0;
687
688errout:
689 return err;
690}
691
692static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
693 struct mpls_route *rt)
694{
695 struct net *net = cfg->rc_nlinfo.nl_net;
696 struct mpls_nh *nh = rt->rt_nh;
697 int err;
698 int i;
699
700 if (!nh)
701 return -ENOMEM;
702
703 err = -EINVAL;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700704
705 nh->nh_labels = cfg->rc_output_labels;
706 for (i = 0; i < nh->nh_labels; i++)
707 nh->nh_label[i] = cfg->rc_output_label[i];
708
709 nh->nh_via_table = cfg->rc_via_table;
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000710 memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700711 nh->nh_via_alen = cfg->rc_via_alen;
712
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000713 err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700714 if (err)
715 goto errout;
716
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800717 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
718 rt->rt_nhn_alive--;
719
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700720 return 0;
721
722errout:
723 return err;
724}
725
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000726static int mpls_nh_build(struct net *net, struct mpls_route *rt,
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800727 struct mpls_nh *nh, int oif, struct nlattr *via,
David Aherna4ac8c92017-03-31 07:14:03 -0700728 struct nlattr *newdst, u8 max_labels)
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700729{
730 int err = -ENOMEM;
731
732 if (!nh)
733 goto errout;
734
735 if (newdst) {
David Aherna4ac8c92017-03-31 07:14:03 -0700736 err = nla_get_labels(newdst, max_labels,
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700737 &nh->nh_labels, nh->nh_label);
738 if (err)
739 goto errout;
740 }
741
Robert Shearmanf20367d2015-12-10 19:30:51 +0000742 if (via) {
743 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
744 __mpls_nh_via(rt, nh));
745 if (err)
746 goto errout;
747 } else {
748 nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
749 }
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700750
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000751 err = mpls_nh_assign_dev(net, rt, nh, oif);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700752 if (err)
753 goto errout;
754
755 return 0;
756
757errout:
758 return err;
759}
760
David Ahern77ef013a2017-03-31 07:14:00 -0700761static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
David Aherna4ac8c92017-03-31 07:14:03 -0700762 u8 cfg_via_alen, u8 *max_via_alen,
763 u8 *max_labels)
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700764{
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700765 int remaining = len;
David Ahern77ef013a2017-03-31 07:14:00 -0700766 u8 nhs = 0;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700767
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000768 *max_via_alen = 0;
David Aherna4ac8c92017-03-31 07:14:03 -0700769 *max_labels = 0;
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000770
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700771 while (rtnh_ok(rtnh, remaining)) {
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000772 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
773 int attrlen;
David Aherna4ac8c92017-03-31 07:14:03 -0700774 u8 n_labels = 0;
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000775
776 attrlen = rtnh_attrlen(rtnh);
777 nla = nla_find(attrs, attrlen, RTA_VIA);
778 if (nla && nla_len(nla) >=
779 offsetof(struct rtvia, rtvia_addr)) {
780 int via_alen = nla_len(nla) -
781 offsetof(struct rtvia, rtvia_addr);
782
783 if (via_alen <= MAX_VIA_ALEN)
784 *max_via_alen = max_t(u16, *max_via_alen,
785 via_alen);
786 }
787
David Aherna4ac8c92017-03-31 07:14:03 -0700788 nla = nla_find(attrs, attrlen, RTA_NEWDST);
789 if (nla &&
790 nla_get_labels(nla, MAX_NEW_LABELS, &n_labels, NULL) != 0)
791 return 0;
792
793 *max_labels = max_t(u8, *max_labels, n_labels);
794
David Ahern77ef013a2017-03-31 07:14:00 -0700795 /* number of nexthops is tracked by a u8.
796 * Check for overflow.
797 */
798 if (nhs == 255)
799 return 0;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700800 nhs++;
David Ahern77ef013a2017-03-31 07:14:00 -0700801
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700802 rtnh = rtnh_next(rtnh, &remaining);
803 }
804
805 /* leftover implies invalid nexthop configuration, discard it */
806 return remaining > 0 ? 0 : nhs;
807}
808
809static int mpls_nh_build_multi(struct mpls_route_config *cfg,
David Aherna4ac8c92017-03-31 07:14:03 -0700810 struct mpls_route *rt, u8 max_labels)
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700811{
812 struct rtnexthop *rtnh = cfg->rc_mp;
813 struct nlattr *nla_via, *nla_newdst;
814 int remaining = cfg->rc_mp_len;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700815 int err = 0;
David Ahern77ef013a2017-03-31 07:14:00 -0700816 u8 nhs = 0;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700817
818 change_nexthops(rt) {
819 int attrlen;
820
821 nla_via = NULL;
822 nla_newdst = NULL;
823
824 err = -EINVAL;
825 if (!rtnh_ok(rtnh, remaining))
826 goto errout;
827
Robert Shearman1c78efa2015-10-23 06:03:28 -0700828 /* neither weighted multipath nor any flags
829 * are supported
830 */
831 if (rtnh->rtnh_hops || rtnh->rtnh_flags)
832 goto errout;
833
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700834 attrlen = rtnh_attrlen(rtnh);
835 if (attrlen > 0) {
836 struct nlattr *attrs = rtnh_attrs(rtnh);
837
838 nla_via = nla_find(attrs, attrlen, RTA_VIA);
839 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
840 }
841
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000842 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
David Aherna4ac8c92017-03-31 07:14:03 -0700843 rtnh->rtnh_ifindex, nla_via, nla_newdst,
844 max_labels);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700845 if (err)
846 goto errout;
847
Roopa Prabhuc89359a2015-12-01 22:18:11 -0800848 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
849 rt->rt_nhn_alive--;
850
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700851 rtnh = rtnh_next(rtnh, &remaining);
852 nhs++;
853 } endfor_nexthops(rt);
854
855 rt->rt_nhn = nhs;
856
857 return 0;
858
859errout:
860 return err;
861}
862
Eric W. Biedermana2519922015-03-03 19:12:40 -0600863static int mpls_route_add(struct mpls_route_config *cfg)
864{
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600865 struct mpls_route __rcu **platform_label;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600866 struct net *net = cfg->rc_nlinfo.nl_net;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600867 struct mpls_route *rt, *old;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600868 int err = -EINVAL;
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000869 u8 max_via_alen;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700870 unsigned index;
David Aherna4ac8c92017-03-31 07:14:03 -0700871 u8 max_labels;
David Ahern77ef013a2017-03-31 07:14:00 -0700872 u8 nhs;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600873
874 index = cfg->rc_label;
875
876 /* If a label was not specified during insert pick one */
877 if ((index == LABEL_NOT_SPECIFIED) &&
878 (cfg->rc_nlflags & NLM_F_CREATE)) {
879 index = find_free_label(net);
880 }
881
Robert Shearmana6affd22015-08-03 17:50:04 +0100882 /* Reserved labels may not be set */
883 if (index < MPLS_LABEL_FIRST_UNRESERVED)
Eric W. Biedermana2519922015-03-03 19:12:40 -0600884 goto errout;
885
886 /* The full 20 bit range may not be supported. */
887 if (index >= net->mpls.platform_labels)
888 goto errout;
889
Eric W. Biedermana2519922015-03-03 19:12:40 -0600890 /* Append makes no sense with mpls */
Eric W. Biederman0f7bbd52015-03-07 16:22:40 -0600891 err = -EOPNOTSUPP;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600892 if (cfg->rc_nlflags & NLM_F_APPEND)
893 goto errout;
894
895 err = -EEXIST;
Eric W. Biederman19d0c342015-03-07 16:21:56 -0600896 platform_label = rtnl_dereference(net->mpls.platform_label);
897 old = rtnl_dereference(platform_label[index]);
Eric W. Biedermana2519922015-03-03 19:12:40 -0600898 if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
899 goto errout;
900
901 err = -EEXIST;
902 if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
903 goto errout;
904
905 err = -ENOENT;
906 if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
907 goto errout;
908
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000909 err = -EINVAL;
David Aherna4ac8c92017-03-31 07:14:03 -0700910 if (cfg->rc_mp) {
911 nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
912 cfg->rc_via_alen, &max_via_alen,
913 &max_labels);
914 } else {
915 max_via_alen = cfg->rc_via_alen;
916 max_labels = cfg->rc_output_labels;
917 nhs = 1;
918 }
919
Robert Shearmancf4b24f2015-10-27 00:37:36 +0000920 if (nhs == 0)
921 goto errout;
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700922
Eric W. Biedermana2519922015-03-03 19:12:40 -0600923 err = -ENOMEM;
David Aherna4ac8c92017-03-31 07:14:03 -0700924 rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
David Aherndf1c6312017-03-31 07:14:02 -0700925 if (IS_ERR(rt)) {
926 err = PTR_ERR(rt);
Eric W. Biedermana2519922015-03-03 19:12:40 -0600927 goto errout;
David Aherndf1c6312017-03-31 07:14:02 -0700928 }
Eric W. Biedermana2519922015-03-03 19:12:40 -0600929
Eric W. Biedermana2519922015-03-03 19:12:40 -0600930 rt->rt_protocol = cfg->rc_protocol;
Robert Shearman118d5232015-08-06 11:04:56 +0100931 rt->rt_payload_type = cfg->rc_payload_type;
Robert Shearman5b441ac2017-03-10 20:43:24 +0000932 rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600933
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700934 if (cfg->rc_mp)
David Aherna4ac8c92017-03-31 07:14:03 -0700935 err = mpls_nh_build_multi(cfg, rt, max_labels);
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700936 else
937 err = mpls_nh_build_from_cfg(cfg, rt);
938 if (err)
939 goto freert;
Eric W. Biedermana2519922015-03-03 19:12:40 -0600940
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700941 mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
942
Eric W. Biedermana2519922015-03-03 19:12:40 -0600943 return 0;
944
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700945freert:
946 mpls_rt_free(rt);
Eric W. Biedermana2519922015-03-03 19:12:40 -0600947errout:
Eric W. Biedermana2519922015-03-03 19:12:40 -0600948 return err;
949}
950
951static int mpls_route_del(struct mpls_route_config *cfg)
952{
953 struct net *net = cfg->rc_nlinfo.nl_net;
954 unsigned index;
955 int err = -EINVAL;
956
957 index = cfg->rc_label;
958
Robert Shearmana6affd22015-08-03 17:50:04 +0100959 /* Reserved labels may not be removed */
960 if (index < MPLS_LABEL_FIRST_UNRESERVED)
Eric W. Biedermana2519922015-03-03 19:12:40 -0600961 goto errout;
962
963 /* The full 20 bit range may not be supported */
964 if (index >= net->mpls.platform_labels)
965 goto errout;
966
Roopa Prabhuf8efb732015-10-23 06:03:27 -0700967 mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
Eric W. Biedermana2519922015-03-03 19:12:40 -0600968
969 err = 0;
970errout:
971 return err;
972}
973
Robert Shearman27d69102017-01-16 14:16:37 +0000974static void mpls_get_stats(struct mpls_dev *mdev,
975 struct mpls_link_stats *stats)
976{
977 struct mpls_pcpu_stats *p;
978 int i;
979
980 memset(stats, 0, sizeof(*stats));
981
982 for_each_possible_cpu(i) {
983 struct mpls_link_stats local;
984 unsigned int start;
985
986 p = per_cpu_ptr(mdev->stats, i);
987 do {
988 start = u64_stats_fetch_begin(&p->syncp);
989 local = p->stats;
990 } while (u64_stats_fetch_retry(&p->syncp, start));
991
992 stats->rx_packets += local.rx_packets;
993 stats->rx_bytes += local.rx_bytes;
994 stats->tx_packets += local.tx_packets;
995 stats->tx_bytes += local.tx_bytes;
996 stats->rx_errors += local.rx_errors;
997 stats->tx_errors += local.tx_errors;
998 stats->rx_dropped += local.rx_dropped;
999 stats->tx_dropped += local.tx_dropped;
1000 stats->rx_noroute += local.rx_noroute;
1001 }
1002}
1003
1004static int mpls_fill_stats_af(struct sk_buff *skb,
1005 const struct net_device *dev)
1006{
1007 struct mpls_link_stats *stats;
1008 struct mpls_dev *mdev;
1009 struct nlattr *nla;
1010
1011 mdev = mpls_dev_get(dev);
1012 if (!mdev)
1013 return -ENODATA;
1014
1015 nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
1016 sizeof(struct mpls_link_stats),
1017 MPLS_STATS_UNSPEC);
1018 if (!nla)
1019 return -EMSGSIZE;
1020
1021 stats = nla_data(nla);
1022 mpls_get_stats(mdev, stats);
1023
1024 return 0;
1025}
1026
1027static size_t mpls_get_stats_af_size(const struct net_device *dev)
1028{
1029 struct mpls_dev *mdev;
1030
1031 mdev = mpls_dev_get(dev);
1032 if (!mdev)
1033 return 0;
1034
1035 return nla_total_size_64bit(sizeof(struct mpls_link_stats));
1036}
1037
David Ahern24045a02017-02-20 08:03:30 -08001038static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
1039 u32 portid, u32 seq, int event,
1040 unsigned int flags, int type)
1041{
1042 struct nlmsghdr *nlh;
1043 struct netconfmsg *ncm;
1044 bool all = false;
1045
1046 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1047 flags);
1048 if (!nlh)
1049 return -EMSGSIZE;
1050
1051 if (type == NETCONFA_ALL)
1052 all = true;
1053
1054 ncm = nlmsg_data(nlh);
1055 ncm->ncm_family = AF_MPLS;
1056
1057 if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
1058 goto nla_put_failure;
1059
1060 if ((all || type == NETCONFA_INPUT) &&
1061 nla_put_s32(skb, NETCONFA_INPUT,
1062 mdev->input_enabled) < 0)
1063 goto nla_put_failure;
1064
1065 nlmsg_end(skb, nlh);
1066 return 0;
1067
1068nla_put_failure:
1069 nlmsg_cancel(skb, nlh);
1070 return -EMSGSIZE;
1071}
1072
1073static int mpls_netconf_msgsize_devconf(int type)
1074{
1075 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1076 + nla_total_size(4); /* NETCONFA_IFINDEX */
1077 bool all = false;
1078
1079 if (type == NETCONFA_ALL)
1080 all = true;
1081
1082 if (all || type == NETCONFA_INPUT)
1083 size += nla_total_size(4);
1084
1085 return size;
1086}
1087
David Ahern823566a2017-03-28 14:28:06 -07001088static void mpls_netconf_notify_devconf(struct net *net, int event,
1089 int type, struct mpls_dev *mdev)
David Ahern24045a02017-02-20 08:03:30 -08001090{
1091 struct sk_buff *skb;
1092 int err = -ENOBUFS;
1093
1094 skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
1095 if (!skb)
1096 goto errout;
1097
David Ahern823566a2017-03-28 14:28:06 -07001098 err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
David Ahern24045a02017-02-20 08:03:30 -08001099 if (err < 0) {
1100 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1101 WARN_ON(err == -EMSGSIZE);
1102 kfree_skb(skb);
1103 goto errout;
1104 }
1105
1106 rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
1107 return;
1108errout:
1109 if (err < 0)
1110 rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
1111}
1112
1113static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
1114 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1115};
1116
1117static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
1118 struct nlmsghdr *nlh)
1119{
1120 struct net *net = sock_net(in_skb->sk);
1121 struct nlattr *tb[NETCONFA_MAX + 1];
1122 struct netconfmsg *ncm;
1123 struct net_device *dev;
1124 struct mpls_dev *mdev;
1125 struct sk_buff *skb;
1126 int ifindex;
1127 int err;
1128
1129 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1130 devconf_mpls_policy);
1131 if (err < 0)
1132 goto errout;
1133
1134 err = -EINVAL;
1135 if (!tb[NETCONFA_IFINDEX])
1136 goto errout;
1137
1138 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1139 dev = __dev_get_by_index(net, ifindex);
1140 if (!dev)
1141 goto errout;
1142
1143 mdev = mpls_dev_get(dev);
1144 if (!mdev)
1145 goto errout;
1146
1147 err = -ENOBUFS;
1148 skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1149 if (!skb)
1150 goto errout;
1151
1152 err = mpls_netconf_fill_devconf(skb, mdev,
1153 NETLINK_CB(in_skb).portid,
1154 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1155 NETCONFA_ALL);
1156 if (err < 0) {
1157 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1158 WARN_ON(err == -EMSGSIZE);
1159 kfree_skb(skb);
1160 goto errout;
1161 }
1162 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1163errout:
1164 return err;
1165}
1166
1167static int mpls_netconf_dump_devconf(struct sk_buff *skb,
1168 struct netlink_callback *cb)
1169{
1170 struct net *net = sock_net(skb->sk);
1171 struct hlist_head *head;
1172 struct net_device *dev;
1173 struct mpls_dev *mdev;
1174 int idx, s_idx;
1175 int h, s_h;
1176
1177 s_h = cb->args[0];
1178 s_idx = idx = cb->args[1];
1179
1180 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1181 idx = 0;
1182 head = &net->dev_index_head[h];
1183 rcu_read_lock();
1184 cb->seq = net->dev_base_seq;
1185 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1186 if (idx < s_idx)
1187 goto cont;
1188 mdev = mpls_dev_get(dev);
1189 if (!mdev)
1190 goto cont;
1191 if (mpls_netconf_fill_devconf(skb, mdev,
1192 NETLINK_CB(cb->skb).portid,
1193 cb->nlh->nlmsg_seq,
1194 RTM_NEWNETCONF,
1195 NLM_F_MULTI,
1196 NETCONFA_ALL) < 0) {
1197 rcu_read_unlock();
1198 goto done;
1199 }
1200 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1201cont:
1202 idx++;
1203 }
1204 rcu_read_unlock();
1205 }
1206done:
1207 cb->args[0] = h;
1208 cb->args[1] = idx;
1209
1210 return skb->len;
1211}
1212
Robert Shearman37bde792015-04-22 11:14:38 +01001213#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
1214 (&((struct mpls_dev *)0)->field)
1215
David Ahern24045a02017-02-20 08:03:30 -08001216static int mpls_conf_proc(struct ctl_table *ctl, int write,
1217 void __user *buffer,
1218 size_t *lenp, loff_t *ppos)
1219{
1220 int oval = *(int *)ctl->data;
1221 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1222
1223 if (write) {
1224 struct mpls_dev *mdev = ctl->extra1;
1225 int i = (int *)ctl->data - (int *)mdev;
1226 struct net *net = ctl->extra2;
1227 int val = *(int *)ctl->data;
1228
1229 if (i == offsetof(struct mpls_dev, input_enabled) &&
1230 val != oval) {
David Ahern823566a2017-03-28 14:28:06 -07001231 mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
1232 NETCONFA_INPUT, mdev);
David Ahern24045a02017-02-20 08:03:30 -08001233 }
1234 }
1235
1236 return ret;
1237}
1238
Robert Shearman37bde792015-04-22 11:14:38 +01001239static const struct ctl_table mpls_dev_table[] = {
1240 {
1241 .procname = "input",
1242 .maxlen = sizeof(int),
1243 .mode = 0644,
David Ahern24045a02017-02-20 08:03:30 -08001244 .proc_handler = mpls_conf_proc,
Robert Shearman37bde792015-04-22 11:14:38 +01001245 .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
1246 },
1247 { }
1248};
1249
1250static int mpls_dev_sysctl_register(struct net_device *dev,
1251 struct mpls_dev *mdev)
1252{
1253 char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
David Ahern24045a02017-02-20 08:03:30 -08001254 struct net *net = dev_net(dev);
Robert Shearman37bde792015-04-22 11:14:38 +01001255 struct ctl_table *table;
1256 int i;
1257
1258 table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
1259 if (!table)
1260 goto out;
1261
1262 /* Table data contains only offsets relative to the base of
1263 * the mdev at this point, so make them absolute.
1264 */
David Ahern24045a02017-02-20 08:03:30 -08001265 for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
Robert Shearman37bde792015-04-22 11:14:38 +01001266 table[i].data = (char *)mdev + (uintptr_t)table[i].data;
David Ahern24045a02017-02-20 08:03:30 -08001267 table[i].extra1 = mdev;
1268 table[i].extra2 = net;
1269 }
Robert Shearman37bde792015-04-22 11:14:38 +01001270
1271 snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
1272
David Ahern1182e4d2017-03-28 14:28:07 -07001273 mdev->sysctl = register_net_sysctl(net, path, table);
Robert Shearman37bde792015-04-22 11:14:38 +01001274 if (!mdev->sysctl)
1275 goto free;
1276
David Ahern1182e4d2017-03-28 14:28:07 -07001277 mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
Robert Shearman37bde792015-04-22 11:14:38 +01001278 return 0;
1279
1280free:
1281 kfree(table);
1282out:
1283 return -ENOBUFS;
1284}
1285
David Ahern1182e4d2017-03-28 14:28:07 -07001286static void mpls_dev_sysctl_unregister(struct net_device *dev,
1287 struct mpls_dev *mdev)
Robert Shearman37bde792015-04-22 11:14:38 +01001288{
David Ahern1182e4d2017-03-28 14:28:07 -07001289 struct net *net = dev_net(dev);
Robert Shearman37bde792015-04-22 11:14:38 +01001290 struct ctl_table *table;
1291
1292 table = mdev->sysctl->ctl_table_arg;
1293 unregister_net_sysctl_table(mdev->sysctl);
1294 kfree(table);
David Ahern1182e4d2017-03-28 14:28:07 -07001295
1296 mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
Robert Shearman37bde792015-04-22 11:14:38 +01001297}
1298
Robert Shearman03c57742015-04-22 11:14:37 +01001299static struct mpls_dev *mpls_add_dev(struct net_device *dev)
1300{
1301 struct mpls_dev *mdev;
1302 int err = -ENOMEM;
Robert Shearman27d69102017-01-16 14:16:37 +00001303 int i;
Robert Shearman03c57742015-04-22 11:14:37 +01001304
1305 ASSERT_RTNL();
1306
1307 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1308 if (!mdev)
1309 return ERR_PTR(err);
1310
Robert Shearman27d69102017-01-16 14:16:37 +00001311 mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
1312 if (!mdev->stats)
1313 goto free;
1314
1315 for_each_possible_cpu(i) {
1316 struct mpls_pcpu_stats *mpls_stats;
1317
1318 mpls_stats = per_cpu_ptr(mdev->stats, i);
1319 u64_stats_init(&mpls_stats->syncp);
1320 }
1321
David Ahern1182e4d2017-03-28 14:28:07 -07001322 mdev->dev = dev;
1323
Robert Shearman37bde792015-04-22 11:14:38 +01001324 err = mpls_dev_sysctl_register(dev, mdev);
1325 if (err)
1326 goto free;
1327
Robert Shearman03c57742015-04-22 11:14:37 +01001328 rcu_assign_pointer(dev->mpls_ptr, mdev);
1329
1330 return mdev;
Robert Shearman37bde792015-04-22 11:14:38 +01001331
1332free:
Robert Shearman27d69102017-01-16 14:16:37 +00001333 free_percpu(mdev->stats);
Robert Shearman37bde792015-04-22 11:14:38 +01001334 kfree(mdev);
1335 return ERR_PTR(err);
Robert Shearman03c57742015-04-22 11:14:37 +01001336}
1337
Robert Shearman27d69102017-01-16 14:16:37 +00001338static void mpls_dev_destroy_rcu(struct rcu_head *head)
1339{
1340 struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
1341
1342 free_percpu(mdev->stats);
1343 kfree(mdev);
1344}
1345
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001346static void mpls_ifdown(struct net_device *dev, int event)
Eric W. Biederman01891972015-03-03 19:10:47 -06001347{
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001348 struct mpls_route __rcu **platform_label;
Eric W. Biederman01891972015-03-03 19:10:47 -06001349 struct net *net = dev_net(dev);
David Ahern77ef013a2017-03-31 07:14:00 -07001350 u8 alive, deleted;
Eric W. Biederman01891972015-03-03 19:10:47 -06001351 unsigned index;
1352
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001353 platform_label = rtnl_dereference(net->mpls.platform_label);
Eric W. Biederman01891972015-03-03 19:10:47 -06001354 for (index = 0; index < net->mpls.platform_labels; index++) {
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001355 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001356
Eric W. Biederman01891972015-03-03 19:10:47 -06001357 if (!rt)
1358 continue;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001359
David Ahern61733c92017-03-13 16:49:10 -07001360 alive = 0;
David Ahern4ea8efa2017-03-24 15:21:57 -07001361 deleted = 0;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001362 change_nexthops(rt) {
David Ahern39eb8cd2017-03-31 07:13:59 -07001363 unsigned int nh_flags = nh->nh_flags;
1364
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001365 if (rtnl_dereference(nh->nh_dev) != dev)
David Ahern61733c92017-03-13 16:49:10 -07001366 goto next;
1367
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001368 switch (event) {
1369 case NETDEV_DOWN:
1370 case NETDEV_UNREGISTER:
David Ahern39eb8cd2017-03-31 07:13:59 -07001371 nh_flags |= RTNH_F_DEAD;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001372 /* fall through */
1373 case NETDEV_CHANGE:
David Ahern39eb8cd2017-03-31 07:13:59 -07001374 nh_flags |= RTNH_F_LINKDOWN;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001375 break;
1376 }
1377 if (event == NETDEV_UNREGISTER)
1378 RCU_INIT_POINTER(nh->nh_dev, NULL);
David Ahern39eb8cd2017-03-31 07:13:59 -07001379
1380 if (nh->nh_flags != nh_flags)
1381 WRITE_ONCE(nh->nh_flags, nh_flags);
David Ahern61733c92017-03-13 16:49:10 -07001382next:
David Ahern39eb8cd2017-03-31 07:13:59 -07001383 if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
David Ahern61733c92017-03-13 16:49:10 -07001384 alive++;
David Ahern4ea8efa2017-03-24 15:21:57 -07001385 if (!rtnl_dereference(nh->nh_dev))
1386 deleted++;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001387 } endfor_nexthops(rt);
David Ahern61733c92017-03-13 16:49:10 -07001388
1389 WRITE_ONCE(rt->rt_nhn_alive, alive);
David Ahern4ea8efa2017-03-24 15:21:57 -07001390
1391 /* if there are no more nexthops, delete the route */
1392 if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
1393 mpls_route_update(net, index, NULL, NULL);
Eric W. Biederman01891972015-03-03 19:10:47 -06001394 }
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001395}
Robert Shearman37bde792015-04-22 11:14:38 +01001396
David Ahern39eb8cd2017-03-31 07:13:59 -07001397static void mpls_ifup(struct net_device *dev, unsigned int flags)
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001398{
1399 struct mpls_route __rcu **platform_label;
1400 struct net *net = dev_net(dev);
1401 unsigned index;
David Ahern77ef013a2017-03-31 07:14:00 -07001402 u8 alive;
Robert Shearman03c57742015-04-22 11:14:37 +01001403
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001404 platform_label = rtnl_dereference(net->mpls.platform_label);
1405 for (index = 0; index < net->mpls.platform_labels; index++) {
1406 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1407
1408 if (!rt)
1409 continue;
1410
1411 alive = 0;
1412 change_nexthops(rt) {
David Ahern39eb8cd2017-03-31 07:13:59 -07001413 unsigned int nh_flags = nh->nh_flags;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001414 struct net_device *nh_dev =
1415 rtnl_dereference(nh->nh_dev);
1416
David Ahern39eb8cd2017-03-31 07:13:59 -07001417 if (!(nh_flags & flags)) {
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001418 alive++;
1419 continue;
1420 }
1421 if (nh_dev != dev)
1422 continue;
1423 alive++;
David Ahern39eb8cd2017-03-31 07:13:59 -07001424 nh_flags &= ~flags;
1425 WRITE_ONCE(nh->nh_flags, flags);
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001426 } endfor_nexthops(rt);
1427
David Ahern39eb8cd2017-03-31 07:13:59 -07001428 WRITE_ONCE(rt->rt_nhn_alive, alive);
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001429 }
Eric W. Biederman01891972015-03-03 19:10:47 -06001430}
1431
1432static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1433 void *ptr)
1434{
1435 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Robert Shearman03c57742015-04-22 11:14:37 +01001436 struct mpls_dev *mdev;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001437 unsigned int flags;
Eric W. Biederman01891972015-03-03 19:10:47 -06001438
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001439 if (event == NETDEV_REGISTER) {
Simon Horman407f31b2016-07-07 07:56:15 +02001440 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
Simon Horman0d227a82016-06-16 17:09:09 +09001441 if (dev->type == ARPHRD_ETHER ||
1442 dev->type == ARPHRD_LOOPBACK ||
Simon Horman407f31b2016-07-07 07:56:15 +02001443 dev->type == ARPHRD_IPGRE ||
1444 dev->type == ARPHRD_SIT ||
1445 dev->type == ARPHRD_TUNNEL) {
Robert Shearman03c57742015-04-22 11:14:37 +01001446 mdev = mpls_add_dev(dev);
1447 if (IS_ERR(mdev))
1448 return notifier_from_errno(PTR_ERR(mdev));
1449 }
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001450 return NOTIFY_OK;
1451 }
Robert Shearman03c57742015-04-22 11:14:37 +01001452
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001453 mdev = mpls_dev_get(dev);
1454 if (!mdev)
1455 return NOTIFY_OK;
1456
1457 switch (event) {
1458 case NETDEV_DOWN:
1459 mpls_ifdown(dev, event);
1460 break;
1461 case NETDEV_UP:
1462 flags = dev_get_flags(dev);
1463 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1464 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1465 else
1466 mpls_ifup(dev, RTNH_F_DEAD);
1467 break;
1468 case NETDEV_CHANGE:
1469 flags = dev_get_flags(dev);
1470 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1471 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1472 else
1473 mpls_ifdown(dev, event);
1474 break;
Eric W. Biederman01891972015-03-03 19:10:47 -06001475 case NETDEV_UNREGISTER:
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001476 mpls_ifdown(dev, event);
1477 mdev = mpls_dev_get(dev);
1478 if (mdev) {
David Ahern1182e4d2017-03-28 14:28:07 -07001479 mpls_dev_sysctl_unregister(dev, mdev);
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001480 RCU_INIT_POINTER(dev->mpls_ptr, NULL);
Robert Shearman27d69102017-01-16 14:16:37 +00001481 call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001482 }
Eric W. Biederman01891972015-03-03 19:10:47 -06001483 break;
Robert Shearman0fae3bf2015-06-11 19:58:26 +01001484 case NETDEV_CHANGENAME:
1485 mdev = mpls_dev_get(dev);
1486 if (mdev) {
1487 int err;
1488
David Ahern1182e4d2017-03-28 14:28:07 -07001489 mpls_dev_sysctl_unregister(dev, mdev);
Robert Shearman0fae3bf2015-06-11 19:58:26 +01001490 err = mpls_dev_sysctl_register(dev, mdev);
1491 if (err)
1492 return notifier_from_errno(err);
1493 }
1494 break;
Eric W. Biederman01891972015-03-03 19:10:47 -06001495 }
1496 return NOTIFY_OK;
1497}
1498
1499static struct notifier_block mpls_dev_notifier = {
1500 .notifier_call = mpls_dev_notify,
1501};
1502
Eric W. Biederman03c05662015-03-03 19:13:56 -06001503static int nla_put_via(struct sk_buff *skb,
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06001504 u8 table, const void *addr, int alen)
Eric W. Biederman03c05662015-03-03 19:13:56 -06001505{
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06001506 static const int table_to_family[NEIGH_NR_TABLES + 1] = {
1507 AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
1508 };
Eric W. Biederman03c05662015-03-03 19:13:56 -06001509 struct nlattr *nla;
1510 struct rtvia *via;
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06001511 int family = AF_UNSPEC;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001512
1513 nla = nla_reserve(skb, RTA_VIA, alen + 2);
1514 if (!nla)
1515 return -EMSGSIZE;
1516
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06001517 if (table <= NEIGH_NR_TABLES)
1518 family = table_to_family[table];
1519
Eric W. Biederman03c05662015-03-03 19:13:56 -06001520 via = nla_data(nla);
1521 via->rtvia_family = family;
1522 memcpy(via->rtvia_addr, addr, alen);
1523 return 0;
1524}
1525
Eric W. Biederman966bae32015-03-03 19:13:19 -06001526int nla_put_labels(struct sk_buff *skb, int attrtype,
1527 u8 labels, const u32 label[])
1528{
1529 struct nlattr *nla;
1530 struct mpls_shim_hdr *nla_label;
1531 bool bos;
1532 int i;
1533 nla = nla_reserve(skb, attrtype, labels*4);
1534 if (!nla)
1535 return -EMSGSIZE;
1536
1537 nla_label = nla_data(nla);
1538 bos = true;
1539 for (i = labels - 1; i >= 0; i--) {
1540 nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
1541 bos = false;
1542 }
1543
1544 return 0;
1545}
Roopa Prabhuface0182015-07-21 10:43:52 +02001546EXPORT_SYMBOL_GPL(nla_put_labels);
Eric W. Biederman966bae32015-03-03 19:13:19 -06001547
1548int nla_get_labels(const struct nlattr *nla,
David Aherna4ac8c92017-03-31 07:14:03 -07001549 u8 max_labels, u8 *labels, u32 label[])
Eric W. Biederman966bae32015-03-03 19:13:19 -06001550{
1551 unsigned len = nla_len(nla);
Eric W. Biederman966bae32015-03-03 19:13:19 -06001552 struct mpls_shim_hdr *nla_label;
David Aherna4ac8c92017-03-31 07:14:03 -07001553 u8 nla_labels;
Eric W. Biederman966bae32015-03-03 19:13:19 -06001554 bool bos;
1555 int i;
1556
David Aherna4ac8c92017-03-31 07:14:03 -07001557 /* len needs to be an even multiple of 4 (the label size). Number
1558 * of labels is a u8 so check for overflow.
1559 */
1560 if (len & 3 || len / 4 > 255)
Eric W. Biederman966bae32015-03-03 19:13:19 -06001561 return -EINVAL;
1562
1563 /* Limit the number of new labels allowed */
1564 nla_labels = len/4;
1565 if (nla_labels > max_labels)
1566 return -EINVAL;
1567
David Aherna4ac8c92017-03-31 07:14:03 -07001568 /* when label == NULL, caller wants number of labels */
1569 if (!label)
1570 goto out;
1571
Eric W. Biederman966bae32015-03-03 19:13:19 -06001572 nla_label = nla_data(nla);
1573 bos = true;
1574 for (i = nla_labels - 1; i >= 0; i--, bos = false) {
1575 struct mpls_entry_decoded dec;
1576 dec = mpls_entry_decode(nla_label + i);
1577
1578 /* Ensure the bottom of stack flag is properly set
1579 * and ttl and tc are both clear.
1580 */
1581 if ((dec.bos != bos) || dec.ttl || dec.tc)
1582 return -EINVAL;
1583
Robert Shearman5a9ab012015-04-22 11:14:39 +01001584 switch (dec.label) {
Tom Herbert78f5b892015-05-07 08:08:51 -07001585 case MPLS_LABEL_IMPLNULL:
Robert Shearman5a9ab012015-04-22 11:14:39 +01001586 /* RFC3032: This is a label that an LSR may
1587 * assign and distribute, but which never
1588 * actually appears in the encapsulation.
1589 */
1590 return -EINVAL;
1591 }
1592
Eric W. Biederman966bae32015-03-03 19:13:19 -06001593 label[i] = dec.label;
1594 }
David Aherna4ac8c92017-03-31 07:14:03 -07001595out:
Eric W. Biederman966bae32015-03-03 19:13:19 -06001596 *labels = nla_labels;
1597 return 0;
1598}
Roopa Prabhuface0182015-07-21 10:43:52 +02001599EXPORT_SYMBOL_GPL(nla_get_labels);
Eric W. Biederman966bae32015-03-03 19:13:19 -06001600
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001601int nla_get_via(const struct nlattr *nla, u8 *via_alen,
1602 u8 *via_table, u8 via_addr[])
1603{
1604 struct rtvia *via = nla_data(nla);
1605 int err = -EINVAL;
1606 int alen;
1607
1608 if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
1609 goto errout;
1610 alen = nla_len(nla) -
1611 offsetof(struct rtvia, rtvia_addr);
1612 if (alen > MAX_VIA_ALEN)
1613 goto errout;
1614
1615 /* Validate the address family */
1616 switch (via->rtvia_family) {
1617 case AF_PACKET:
1618 *via_table = NEIGH_LINK_TABLE;
1619 break;
1620 case AF_INET:
1621 *via_table = NEIGH_ARP_TABLE;
1622 if (alen != 4)
1623 goto errout;
1624 break;
1625 case AF_INET6:
1626 *via_table = NEIGH_ND_TABLE;
1627 if (alen != 16)
1628 goto errout;
1629 break;
1630 default:
1631 /* Unsupported address family */
1632 goto errout;
1633 }
1634
1635 memcpy(via_addr, via->rtvia_addr, alen);
1636 *via_alen = alen;
1637 err = 0;
1638
1639errout:
1640 return err;
1641}
1642
Eric W. Biederman03c05662015-03-03 19:13:56 -06001643static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1644 struct mpls_route_config *cfg)
1645{
1646 struct rtmsg *rtm;
1647 struct nlattr *tb[RTA_MAX+1];
1648 int index;
1649 int err;
1650
1651 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
1652 if (err < 0)
1653 goto errout;
1654
1655 err = -EINVAL;
1656 rtm = nlmsg_data(nlh);
Eric W. Biederman03c05662015-03-03 19:13:56 -06001657
1658 if (rtm->rtm_family != AF_MPLS)
1659 goto errout;
1660 if (rtm->rtm_dst_len != 20)
1661 goto errout;
1662 if (rtm->rtm_src_len != 0)
1663 goto errout;
1664 if (rtm->rtm_tos != 0)
1665 goto errout;
1666 if (rtm->rtm_table != RT_TABLE_MAIN)
1667 goto errout;
1668 /* Any value is acceptable for rtm_protocol */
1669
1670 /* As mpls uses destination specific addresses
1671 * (or source specific address in the case of multicast)
1672 * all addresses have universal scope.
1673 */
1674 if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
1675 goto errout;
1676 if (rtm->rtm_type != RTN_UNICAST)
1677 goto errout;
1678 if (rtm->rtm_flags != 0)
1679 goto errout;
1680
1681 cfg->rc_label = LABEL_NOT_SPECIFIED;
1682 cfg->rc_protocol = rtm->rtm_protocol;
Robert Shearmaneb7809f2015-12-10 19:30:50 +00001683 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
Robert Shearman5b441ac2017-03-10 20:43:24 +00001684 cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001685 cfg->rc_nlflags = nlh->nlmsg_flags;
1686 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1687 cfg->rc_nlinfo.nlh = nlh;
1688 cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
1689
1690 for (index = 0; index <= RTA_MAX; index++) {
1691 struct nlattr *nla = tb[index];
1692 if (!nla)
1693 continue;
1694
Suraj Deshmukh14dd3e12016-12-03 07:59:26 +00001695 switch (index) {
Eric W. Biederman03c05662015-03-03 19:13:56 -06001696 case RTA_OIF:
1697 cfg->rc_ifindex = nla_get_u32(nla);
1698 break;
1699 case RTA_NEWDST:
1700 if (nla_get_labels(nla, MAX_NEW_LABELS,
1701 &cfg->rc_output_labels,
1702 cfg->rc_output_label))
1703 goto errout;
1704 break;
1705 case RTA_DST:
1706 {
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001707 u8 label_count;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001708 if (nla_get_labels(nla, 1, &label_count,
1709 &cfg->rc_label))
1710 goto errout;
1711
Robert Shearmana6affd22015-08-03 17:50:04 +01001712 /* Reserved labels may not be set */
1713 if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
Eric W. Biederman03c05662015-03-03 19:13:56 -06001714 goto errout;
1715
1716 break;
1717 }
1718 case RTA_VIA:
1719 {
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001720 if (nla_get_via(nla, &cfg->rc_via_alen,
1721 &cfg->rc_via_table, cfg->rc_via))
Robert Shearmanf8d54af2015-03-06 10:47:00 +00001722 goto errout;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001723 break;
1724 }
1725 case RTA_MULTIPATH:
1726 {
1727 cfg->rc_mp = nla_data(nla);
1728 cfg->rc_mp_len = nla_len(nla);
Eric W. Biederman03c05662015-03-03 19:13:56 -06001729 break;
1730 }
Robert Shearman5b441ac2017-03-10 20:43:24 +00001731 case RTA_TTL_PROPAGATE:
1732 {
1733 u8 ttl_propagate = nla_get_u8(nla);
1734
1735 if (ttl_propagate > 1)
1736 goto errout;
1737 cfg->rc_ttl_propagate = ttl_propagate ?
1738 MPLS_TTL_PROP_ENABLED :
1739 MPLS_TTL_PROP_DISABLED;
1740 break;
1741 }
Eric W. Biederman03c05662015-03-03 19:13:56 -06001742 default:
1743 /* Unsupported attribute */
1744 goto errout;
1745 }
1746 }
1747
1748 err = 0;
1749errout:
1750 return err;
1751}
1752
1753static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1754{
David Aherna4ac8c92017-03-31 07:14:03 -07001755 struct mpls_route_config *cfg;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001756 int err;
1757
David Aherna4ac8c92017-03-31 07:14:03 -07001758 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1759 if (!cfg)
1760 return -ENOMEM;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001761
David Aherna4ac8c92017-03-31 07:14:03 -07001762 err = rtm_to_route_config(skb, nlh, cfg);
1763 if (err < 0)
1764 goto out;
1765
1766 err = mpls_route_del(cfg);
1767out:
1768 kfree(cfg);
1769
1770 return err;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001771}
1772
1773
1774static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1775{
David Aherna4ac8c92017-03-31 07:14:03 -07001776 struct mpls_route_config *cfg;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001777 int err;
1778
David Aherna4ac8c92017-03-31 07:14:03 -07001779 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1780 if (!cfg)
1781 return -ENOMEM;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001782
David Aherna4ac8c92017-03-31 07:14:03 -07001783 err = rtm_to_route_config(skb, nlh, cfg);
1784 if (err < 0)
1785 goto out;
1786
1787 err = mpls_route_add(cfg);
1788out:
1789 kfree(cfg);
1790
1791 return err;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001792}
1793
1794static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1795 u32 label, struct mpls_route *rt, int flags)
1796{
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001797 struct net_device *dev;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001798 struct nlmsghdr *nlh;
1799 struct rtmsg *rtm;
1800
1801 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1802 if (nlh == NULL)
1803 return -EMSGSIZE;
1804
1805 rtm = nlmsg_data(nlh);
1806 rtm->rtm_family = AF_MPLS;
1807 rtm->rtm_dst_len = 20;
1808 rtm->rtm_src_len = 0;
1809 rtm->rtm_tos = 0;
1810 rtm->rtm_table = RT_TABLE_MAIN;
1811 rtm->rtm_protocol = rt->rt_protocol;
1812 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1813 rtm->rtm_type = RTN_UNICAST;
1814 rtm->rtm_flags = 0;
1815
Eric W. Biederman03c05662015-03-03 19:13:56 -06001816 if (nla_put_labels(skb, RTA_DST, 1, &label))
1817 goto nla_put_failure;
Robert Shearman5b441ac2017-03-10 20:43:24 +00001818
1819 if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
1820 bool ttl_propagate =
1821 rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
1822
1823 if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
1824 ttl_propagate))
1825 goto nla_put_failure;
1826 }
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001827 if (rt->rt_nhn == 1) {
Robert Shearmancf4b24f2015-10-27 00:37:36 +00001828 const struct mpls_nh *nh = rt->rt_nh;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001829
1830 if (nh->nh_labels &&
1831 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1832 nh->nh_label))
1833 goto nla_put_failure;
Robert Shearmaneb7809f2015-12-10 19:30:50 +00001834 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
Robert Shearman72dcac92015-12-10 19:30:49 +00001835 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001836 nh->nh_via_alen))
1837 goto nla_put_failure;
1838 dev = rtnl_dereference(nh->nh_dev);
1839 if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
1840 goto nla_put_failure;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001841 if (nh->nh_flags & RTNH_F_LINKDOWN)
1842 rtm->rtm_flags |= RTNH_F_LINKDOWN;
1843 if (nh->nh_flags & RTNH_F_DEAD)
1844 rtm->rtm_flags |= RTNH_F_DEAD;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001845 } else {
1846 struct rtnexthop *rtnh;
1847 struct nlattr *mp;
David Ahern77ef013a2017-03-31 07:14:00 -07001848 u8 linkdown = 0;
1849 u8 dead = 0;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001850
1851 mp = nla_nest_start(skb, RTA_MULTIPATH);
1852 if (!mp)
1853 goto nla_put_failure;
1854
1855 for_nexthops(rt) {
David Ahernc00e51d2017-03-24 15:21:56 -07001856 dev = rtnl_dereference(nh->nh_dev);
1857 if (!dev)
1858 continue;
1859
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001860 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1861 if (!rtnh)
1862 goto nla_put_failure;
1863
David Ahernc00e51d2017-03-24 15:21:56 -07001864 rtnh->rtnh_ifindex = dev->ifindex;
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001865 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1866 rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
1867 linkdown++;
1868 }
1869 if (nh->nh_flags & RTNH_F_DEAD) {
1870 rtnh->rtnh_flags |= RTNH_F_DEAD;
1871 dead++;
1872 }
1873
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001874 if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
1875 nh->nh_labels,
1876 nh->nh_label))
1877 goto nla_put_failure;
Robert Shearmanf20367d2015-12-10 19:30:51 +00001878 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1879 nla_put_via(skb, nh->nh_via_table,
Robert Shearmancf4b24f2015-10-27 00:37:36 +00001880 mpls_nh_via(rt, nh),
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001881 nh->nh_via_alen))
1882 goto nla_put_failure;
1883
1884 /* length of rtnetlink header + attributes */
1885 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
1886 } endfor_nexthops(rt);
1887
Roopa Prabhuc89359a2015-12-01 22:18:11 -08001888 if (linkdown == rt->rt_nhn)
1889 rtm->rtm_flags |= RTNH_F_LINKDOWN;
1890 if (dead == rt->rt_nhn)
1891 rtm->rtm_flags |= RTNH_F_DEAD;
1892
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001893 nla_nest_end(skb, mp);
1894 }
Eric W. Biederman03c05662015-03-03 19:13:56 -06001895
1896 nlmsg_end(skb, nlh);
1897 return 0;
1898
1899nla_put_failure:
1900 nlmsg_cancel(skb, nlh);
1901 return -EMSGSIZE;
1902}
1903
1904static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
1905{
1906 struct net *net = sock_net(skb->sk);
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001907 struct mpls_route __rcu **platform_label;
1908 size_t platform_labels;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001909 unsigned int index;
1910
1911 ASSERT_RTNL();
1912
1913 index = cb->args[0];
Robert Shearmana6affd22015-08-03 17:50:04 +01001914 if (index < MPLS_LABEL_FIRST_UNRESERVED)
1915 index = MPLS_LABEL_FIRST_UNRESERVED;
Eric W. Biederman03c05662015-03-03 19:13:56 -06001916
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001917 platform_label = rtnl_dereference(net->mpls.platform_label);
1918 platform_labels = net->mpls.platform_labels;
1919 for (; index < platform_labels; index++) {
Eric W. Biederman03c05662015-03-03 19:13:56 -06001920 struct mpls_route *rt;
Eric W. Biederman19d0c342015-03-07 16:21:56 -06001921 rt = rtnl_dereference(platform_label[index]);
Eric W. Biederman03c05662015-03-03 19:13:56 -06001922 if (!rt)
1923 continue;
1924
1925 if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
1926 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1927 index, rt, NLM_F_MULTI) < 0)
1928 break;
1929 }
1930 cb->args[0] = index;
1931
1932 return skb->len;
1933}
1934
Eric W. Biederman8de147d2015-03-03 19:14:31 -06001935static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1936{
1937 size_t payload =
1938 NLMSG_ALIGN(sizeof(struct rtmsg))
Robert Shearman5b441ac2017-03-10 20:43:24 +00001939 + nla_total_size(4) /* RTA_DST */
1940 + nla_total_size(1); /* RTA_TTL_PROPAGATE */
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001941
1942 if (rt->rt_nhn == 1) {
1943 struct mpls_nh *nh = rt->rt_nh;
1944
1945 if (nh->nh_dev)
1946 payload += nla_total_size(4); /* RTA_OIF */
Robert Shearmaneb7809f2015-12-10 19:30:50 +00001947 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
Robert Shearman72dcac92015-12-10 19:30:49 +00001948 payload += nla_total_size(2 + nh->nh_via_alen);
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001949 if (nh->nh_labels) /* RTA_NEWDST */
1950 payload += nla_total_size(nh->nh_labels * 4);
1951 } else {
1952 /* each nexthop is packed in an attribute */
1953 size_t nhsize = 0;
1954
1955 for_nexthops(rt) {
David Aherne944e972017-03-28 15:19:49 -07001956 if (!rtnl_dereference(nh->nh_dev))
1957 continue;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001958 nhsize += nla_total_size(sizeof(struct rtnexthop));
Robert Shearmanf20367d2015-12-10 19:30:51 +00001959 /* RTA_VIA */
1960 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
1961 nhsize += nla_total_size(2 + nh->nh_via_alen);
Roopa Prabhuf8efb732015-10-23 06:03:27 -07001962 if (nh->nh_labels)
1963 nhsize += nla_total_size(nh->nh_labels * 4);
1964 } endfor_nexthops(rt);
1965 /* nested attribute */
1966 payload += nla_total_size(nhsize);
1967 }
1968
Eric W. Biederman8de147d2015-03-03 19:14:31 -06001969 return payload;
1970}
1971
1972static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
1973 struct nlmsghdr *nlh, struct net *net, u32 portid,
1974 unsigned int nlm_flags)
1975{
1976 struct sk_buff *skb;
1977 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1978 int err = -ENOBUFS;
1979
1980 skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
1981 if (skb == NULL)
1982 goto errout;
1983
1984 err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
1985 if (err < 0) {
1986 /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1987 WARN_ON(err == -EMSGSIZE);
1988 kfree_skb(skb);
1989 goto errout;
1990 }
1991 rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
1992
1993 return;
1994errout:
1995 if (err < 0)
1996 rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
1997}
1998
Eric W. Biederman7720c012015-03-03 19:11:20 -06001999static int resize_platform_label_table(struct net *net, size_t limit)
2000{
2001 size_t size = sizeof(struct mpls_route *) * limit;
2002 size_t old_limit;
2003 size_t cp_size;
2004 struct mpls_route __rcu **labels = NULL, **old;
2005 struct mpls_route *rt0 = NULL, *rt2 = NULL;
2006 unsigned index;
2007
2008 if (size) {
2009 labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
2010 if (!labels)
2011 labels = vzalloc(size);
2012
2013 if (!labels)
2014 goto nolabels;
2015 }
2016
2017 /* In case the predefined labels need to be populated */
Tom Herbert78f5b892015-05-07 08:08:51 -07002018 if (limit > MPLS_LABEL_IPV4NULL) {
Eric W. Biederman7720c012015-03-03 19:11:20 -06002019 struct net_device *lo = net->loopback_dev;
David Aherna4ac8c92017-03-31 07:14:03 -07002020 rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
David Aherndf1c6312017-03-31 07:14:02 -07002021 if (IS_ERR(rt0))
Eric W. Biederman7720c012015-03-03 19:11:20 -06002022 goto nort0;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07002023 RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002024 rt0->rt_protocol = RTPROT_KERNEL;
Robert Shearman118d5232015-08-06 11:04:56 +01002025 rt0->rt_payload_type = MPT_IPV4;
Robert Shearman5b441ac2017-03-10 20:43:24 +00002026 rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07002027 rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
Robert Shearmanb4e04fc2015-10-27 00:37:35 +00002028 rt0->rt_nh->nh_via_alen = lo->addr_len;
Robert Shearmancf4b24f2015-10-27 00:37:36 +00002029 memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
2030 lo->addr_len);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002031 }
Tom Herbert78f5b892015-05-07 08:08:51 -07002032 if (limit > MPLS_LABEL_IPV6NULL) {
Eric W. Biederman7720c012015-03-03 19:11:20 -06002033 struct net_device *lo = net->loopback_dev;
David Aherna4ac8c92017-03-31 07:14:03 -07002034 rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
David Aherndf1c6312017-03-31 07:14:02 -07002035 if (IS_ERR(rt2))
Eric W. Biederman7720c012015-03-03 19:11:20 -06002036 goto nort2;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07002037 RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002038 rt2->rt_protocol = RTPROT_KERNEL;
Robert Shearman118d5232015-08-06 11:04:56 +01002039 rt2->rt_payload_type = MPT_IPV6;
David Ahern6a18c312017-03-23 19:02:27 -06002040 rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
Roopa Prabhuf8efb732015-10-23 06:03:27 -07002041 rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
Robert Shearmanb4e04fc2015-10-27 00:37:35 +00002042 rt2->rt_nh->nh_via_alen = lo->addr_len;
Robert Shearmancf4b24f2015-10-27 00:37:36 +00002043 memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
2044 lo->addr_len);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002045 }
2046
2047 rtnl_lock();
2048 /* Remember the original table */
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002049 old = rtnl_dereference(net->mpls.platform_label);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002050 old_limit = net->mpls.platform_labels;
2051
2052 /* Free any labels beyond the new table */
2053 for (index = limit; index < old_limit; index++)
Roopa Prabhuf8efb732015-10-23 06:03:27 -07002054 mpls_route_update(net, index, NULL, NULL);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002055
2056 /* Copy over the old labels */
2057 cp_size = size;
2058 if (old_limit < limit)
2059 cp_size = old_limit * sizeof(struct mpls_route *);
2060
2061 memcpy(labels, old, cp_size);
2062
2063 /* If needed set the predefined labels */
Tom Herbert78f5b892015-05-07 08:08:51 -07002064 if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
2065 (limit > MPLS_LABEL_IPV6NULL)) {
2066 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002067 rt2 = NULL;
2068 }
2069
Tom Herbert78f5b892015-05-07 08:08:51 -07002070 if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
2071 (limit > MPLS_LABEL_IPV4NULL)) {
2072 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002073 rt0 = NULL;
2074 }
2075
2076 /* Update the global pointers */
2077 net->mpls.platform_labels = limit;
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002078 rcu_assign_pointer(net->mpls.platform_label, labels);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002079
2080 rtnl_unlock();
2081
2082 mpls_rt_free(rt2);
2083 mpls_rt_free(rt0);
2084
2085 if (old) {
2086 synchronize_rcu();
2087 kvfree(old);
2088 }
2089 return 0;
2090
2091nort2:
2092 mpls_rt_free(rt0);
2093nort0:
2094 kvfree(labels);
2095nolabels:
2096 return -ENOMEM;
2097}
2098
2099static int mpls_platform_labels(struct ctl_table *table, int write,
2100 void __user *buffer, size_t *lenp, loff_t *ppos)
2101{
2102 struct net *net = table->data;
2103 int platform_labels = net->mpls.platform_labels;
2104 int ret;
2105 struct ctl_table tmp = {
2106 .procname = table->procname,
2107 .data = &platform_labels,
2108 .maxlen = sizeof(int),
2109 .mode = table->mode,
2110 .extra1 = &zero,
2111 .extra2 = &label_limit,
2112 };
2113
2114 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2115
2116 if (write && ret == 0)
2117 ret = resize_platform_label_table(net, platform_labels);
2118
2119 return ret;
2120}
2121
Robert Shearman5b441ac2017-03-10 20:43:24 +00002122#define MPLS_NS_SYSCTL_OFFSET(field) \
2123 (&((struct net *)0)->field)
2124
Robert Shearman37bde792015-04-22 11:14:38 +01002125static const struct ctl_table mpls_table[] = {
Eric W. Biederman7720c012015-03-03 19:11:20 -06002126 {
2127 .procname = "platform_labels",
2128 .data = NULL,
2129 .maxlen = sizeof(int),
2130 .mode = 0644,
2131 .proc_handler = mpls_platform_labels,
2132 },
Robert Shearman5b441ac2017-03-10 20:43:24 +00002133 {
2134 .procname = "ip_ttl_propagate",
2135 .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
2136 .maxlen = sizeof(int),
2137 .mode = 0644,
2138 .proc_handler = proc_dointvec_minmax,
2139 .extra1 = &zero,
2140 .extra2 = &one,
2141 },
Robert Shearmana59166e2017-03-10 20:43:25 +00002142 {
2143 .procname = "default_ttl",
2144 .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
2145 .maxlen = sizeof(int),
2146 .mode = 0644,
2147 .proc_handler = proc_dointvec_minmax,
2148 .extra1 = &one,
2149 .extra2 = &ttl_max,
2150 },
Eric W. Biederman7720c012015-03-03 19:11:20 -06002151 { }
2152};
2153
Eric W. Biederman01891972015-03-03 19:10:47 -06002154static int mpls_net_init(struct net *net)
2155{
Eric W. Biederman7720c012015-03-03 19:11:20 -06002156 struct ctl_table *table;
Robert Shearman5b441ac2017-03-10 20:43:24 +00002157 int i;
Eric W. Biederman7720c012015-03-03 19:11:20 -06002158
Eric W. Biederman01891972015-03-03 19:10:47 -06002159 net->mpls.platform_labels = 0;
2160 net->mpls.platform_label = NULL;
Robert Shearman5b441ac2017-03-10 20:43:24 +00002161 net->mpls.ip_ttl_propagate = 1;
Robert Shearmana59166e2017-03-10 20:43:25 +00002162 net->mpls.default_ttl = 255;
Eric W. Biederman01891972015-03-03 19:10:47 -06002163
Eric W. Biederman7720c012015-03-03 19:11:20 -06002164 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
2165 if (table == NULL)
2166 return -ENOMEM;
2167
Robert Shearman5b441ac2017-03-10 20:43:24 +00002168 /* Table data contains only offsets relative to the base of
2169 * the mdev at this point, so make them absolute.
2170 */
2171 for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
2172 table[i].data = (char *)net + (uintptr_t)table[i].data;
2173
Eric W. Biederman7720c012015-03-03 19:11:20 -06002174 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
Nikolay Aleksandrov6ea3c9d52015-08-31 10:44:19 -07002175 if (net->mpls.ctl == NULL) {
2176 kfree(table);
Eric W. Biederman7720c012015-03-03 19:11:20 -06002177 return -ENOMEM;
Nikolay Aleksandrov6ea3c9d52015-08-31 10:44:19 -07002178 }
Eric W. Biederman7720c012015-03-03 19:11:20 -06002179
Eric W. Biederman01891972015-03-03 19:10:47 -06002180 return 0;
2181}
2182
2183static void mpls_net_exit(struct net *net)
2184{
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002185 struct mpls_route __rcu **platform_label;
2186 size_t platform_labels;
Eric W. Biederman7720c012015-03-03 19:11:20 -06002187 struct ctl_table *table;
Eric W. Biederman01891972015-03-03 19:10:47 -06002188 unsigned int index;
2189
Eric W. Biederman7720c012015-03-03 19:11:20 -06002190 table = net->mpls.ctl->ctl_table_arg;
2191 unregister_net_sysctl_table(net->mpls.ctl);
2192 kfree(table);
2193
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002194 /* An rcu grace period has passed since there was a device in
2195 * the network namespace (and thus the last in flight packet)
Eric W. Biederman01891972015-03-03 19:10:47 -06002196 * left this network namespace. This is because
2197 * unregister_netdevice_many and netdev_run_todo has completed
2198 * for each network device that was in this network namespace.
2199 *
2200 * As such no additional rcu synchronization is necessary when
2201 * freeing the platform_label table.
2202 */
2203 rtnl_lock();
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002204 platform_label = rtnl_dereference(net->mpls.platform_label);
2205 platform_labels = net->mpls.platform_labels;
2206 for (index = 0; index < platform_labels; index++) {
2207 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2208 RCU_INIT_POINTER(platform_label[index], NULL);
David Aherne37791e2017-03-10 09:46:15 -08002209 mpls_notify_route(net, index, rt, NULL, NULL);
Eric W. Biederman01891972015-03-03 19:10:47 -06002210 mpls_rt_free(rt);
2211 }
2212 rtnl_unlock();
2213
Eric W. Biederman19d0c342015-03-07 16:21:56 -06002214 kvfree(platform_label);
Eric W. Biederman01891972015-03-03 19:10:47 -06002215}
2216
2217static struct pernet_operations mpls_net_ops = {
2218 .init = mpls_net_init,
2219 .exit = mpls_net_exit,
2220};
2221
Robert Shearman27d69102017-01-16 14:16:37 +00002222static struct rtnl_af_ops mpls_af_ops __read_mostly = {
2223 .family = AF_MPLS,
2224 .fill_stats_af = mpls_fill_stats_af,
2225 .get_stats_af_size = mpls_get_stats_af_size,
2226};
2227
Eric W. Biederman01891972015-03-03 19:10:47 -06002228static int __init mpls_init(void)
2229{
2230 int err;
2231
2232 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
2233
2234 err = register_pernet_subsys(&mpls_net_ops);
2235 if (err)
2236 goto out;
2237
2238 err = register_netdevice_notifier(&mpls_dev_notifier);
2239 if (err)
2240 goto out_unregister_pernet;
2241
2242 dev_add_pack(&mpls_packet_type);
2243
Robert Shearman27d69102017-01-16 14:16:37 +00002244 rtnl_af_register(&mpls_af_ops);
2245
Eric W. Biederman03c05662015-03-03 19:13:56 -06002246 rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
2247 rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
2248 rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
David Ahern24045a02017-02-20 08:03:30 -08002249 rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
2250 mpls_netconf_dump_devconf, NULL);
Eric W. Biederman01891972015-03-03 19:10:47 -06002251 err = 0;
2252out:
2253 return err;
2254
2255out_unregister_pernet:
2256 unregister_pernet_subsys(&mpls_net_ops);
2257 goto out;
2258}
2259module_init(mpls_init);
2260
2261static void __exit mpls_exit(void)
2262{
Eric W. Biederman03c05662015-03-03 19:13:56 -06002263 rtnl_unregister_all(PF_MPLS);
Robert Shearman27d69102017-01-16 14:16:37 +00002264 rtnl_af_unregister(&mpls_af_ops);
Eric W. Biederman01891972015-03-03 19:10:47 -06002265 dev_remove_pack(&mpls_packet_type);
2266 unregister_netdevice_notifier(&mpls_dev_notifier);
2267 unregister_pernet_subsys(&mpls_net_ops);
2268}
2269module_exit(mpls_exit);
2270
2271MODULE_DESCRIPTION("MultiProtocol Label Switching");
2272MODULE_LICENSE("GPL v2");
2273MODULE_ALIAS_NETPROTO(PF_MPLS);