blob: e01a826f2a9c8cef455f217eab07a3ec2669280e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
Alexey Dobriyan86393e52009-08-29 01:34:49 +000011#include <net/dst_ops.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020012#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050015#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/jiffies.h>
17#include <net/neighbour.h>
18#include <asm/processor.h>
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#define DST_GC_MIN (HZ/10)
21#define DST_GC_INC (HZ/2)
22#define DST_GC_MAX (120*HZ)
23
24/* Each dst_entry has reference count and sits in some parent list(s).
25 * When it is removed from parent list, it is "freed" (dst_free).
26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27 * is zero, it can be destroyed immediately, otherwise it is added
28 * to gc list and garbage collector periodically checks the refcnt.
29 */
30
31struct sk_buff;
32
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000033struct dst_entry {
Eric Dumazet1e19e022007-02-09 16:26:55 -080034 struct rcu_head rcu_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 struct dst_entry *child;
36 struct net_device *dev;
David S. Miller62fa8a82011-01-26 20:51:05 -080037 struct dst_ops *ops;
38 unsigned long _metrics;
YOSHIFUJI Hideaki / 吉藤英明ecd98832013-02-20 00:29:08 +000039 unsigned long expires;
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070040 struct dst_entry *path;
YOSHIFUJI Hideaki / 吉藤英明ecd98832013-02-20 00:29:08 +000041 struct dst_entry *from;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -070042#ifdef CONFIG_XFRM
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 struct xfrm_state *xfrm;
Eric Dumazet5635c102008-11-16 19:46:36 -080044#else
45 void *__pad1;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -070046#endif
Eldad Zack7f95e182012-06-16 15:14:49 +020047 int (*input)(struct sk_buff *);
48 int (*output)(struct sk_buff *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
David S. Miller5110effe2012-07-02 02:21:03 -070050 unsigned short flags;
David S. Millerf6b72b62011-07-14 07:53:20 -070051#define DST_HOST 0x0001
52#define DST_NOXFRM 0x0002
53#define DST_NOPOLICY 0x0004
54#define DST_NOHASH 0x0008
55#define DST_NOCACHE 0x0010
56#define DST_NOCOUNT 0x0020
Hannes Frederic Sowae90c1482014-03-06 09:11:07 +010057#define DST_FAKE_RTABLE 0x0040
58#define DST_XFRM_TUNNEL 0x0080
59#define DST_XFRM_QUEUE 0x0100
David S. Millerf6b72b62011-07-14 07:53:20 -070060
David S. Miller5110effe2012-07-02 02:21:03 -070061 unsigned short pending_confirm;
62
David S. Miller62fa8a82011-01-26 20:51:05 -080063 short error;
David S. Millerf5b0a872012-07-19 12:31:33 -070064
65 /* A non-zero value of dst->obsolete forces by-hand validation
66 * of the route entry. Positive values are set by the generic
67 * dst layer to indicate that the entry has been forcefully
68 * destroyed.
69 *
70 * Negative values are used by the implementation layer code to
71 * force invocation of the dst_ops->check() method.
72 */
David S. Miller62fa8a82011-01-26 20:51:05 -080073 short obsolete;
David S. Millerf5b0a872012-07-19 12:31:33 -070074#define DST_OBSOLETE_NONE 0
75#define DST_OBSOLETE_DEAD 2
76#define DST_OBSOLETE_FORCE_CHK -1
David S. Millerceb33202012-07-17 11:31:28 -070077#define DST_OBSOLETE_KILL -2
David S. Miller62fa8a82011-01-26 20:51:05 -080078 unsigned short header_len; /* more space at head required */
79 unsigned short trailer_len; /* space to reserve at tail */
Patrick McHardyc7066f72011-01-14 13:36:42 +010080#ifdef CONFIG_IP_ROUTE_CLASSID
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070081 __u32 tclassid;
Eric Dumazet5635c102008-11-16 19:46:36 -080082#else
83 __u32 __pad2;
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070084#endif
85
Eric Dumazet5635c102008-11-16 19:46:36 -080086 /*
87 * Align __refcnt to a 64 bytes alignment
88 * (L1_CACHE_SIZE would be too much)
89 */
90#ifdef CONFIG_64BIT
David S. Millerf6b72b62011-07-14 07:53:20 -070091 long __pad_to_align_refcnt[2];
Eric Dumazet5635c102008-11-16 19:46:36 -080092#endif
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070093 /*
94 * __refcnt wants to be on a different cache line from
95 * input/output/ops or performance tanks badly
96 */
Eric Dumazet1e19e022007-02-09 16:26:55 -080097 atomic_t __refcnt; /* client references */
98 int __use;
Zhang Yanminf1dd9c32008-03-12 22:52:37 -070099 unsigned long lastuse;
Eric Dumazet1e19e022007-02-09 16:26:55 -0800100 union {
Eric Dumazetfc766e4c2010-10-29 03:09:24 +0000101 struct dst_entry *next;
102 struct rtable __rcu *rt_next;
103 struct rt6_info *rt6_next;
104 struct dn_route __rcu *dn_next;
Eric Dumazet1e19e022007-02-09 16:26:55 -0800105 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106};
107
Joe Perchesa4023dd2013-09-20 11:23:21 -0700108u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
Eric Dumazeta37e6e32012-08-07 10:55:45 +0000109extern const u32 dst_default_metrics[];
David S. Miller62fa8a82011-01-26 20:51:05 -0800110
111#define DST_METRICS_READ_ONLY 0x1UL
112#define __DST_METRICS_PTR(Y) \
113 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
114#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
115
116static inline bool dst_metrics_read_only(const struct dst_entry *dst)
117{
118 return dst->_metrics & DST_METRICS_READ_ONLY;
119}
120
Joe Perchesa4023dd2013-09-20 11:23:21 -0700121void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
David S. Miller62fa8a82011-01-26 20:51:05 -0800122
123static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
124{
125 unsigned long val = dst->_metrics;
126 if (!(val & DST_METRICS_READ_ONLY))
127 __dst_destroy_metrics_generic(dst, val);
128}
129
130static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
131{
132 unsigned long p = dst->_metrics;
133
Stephen Hemminger1f370702011-05-24 13:50:52 -0400134 BUG_ON(!p);
135
David S. Miller62fa8a82011-01-26 20:51:05 -0800136 if (p & DST_METRICS_READ_ONLY)
137 return dst->ops->cow_metrics(dst, p);
138 return __DST_METRICS_PTR(p);
139}
140
141/* This may only be invoked before the entry has reached global
142 * visibility.
143 */
144static inline void dst_init_metrics(struct dst_entry *dst,
145 const u32 *src_metrics,
146 bool read_only)
147{
148 dst->_metrics = ((unsigned long) src_metrics) |
149 (read_only ? DST_METRICS_READ_ONLY : 0);
150}
151
152static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
153{
154 u32 *dst_metrics = dst_metrics_write_ptr(dest);
155
156 if (dst_metrics) {
157 u32 *src_metrics = DST_METRICS_PTR(src);
158
159 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
160 }
161}
162
163static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
164{
165 return DST_METRICS_PTR(dst);
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static inline u32
David S. Miller5170ae82010-12-12 21:35:57 -0800169dst_metric_raw(const struct dst_entry *dst, const int metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
David S. Miller62fa8a82011-01-26 20:51:05 -0800171 u32 *p = DST_METRICS_PTR(dst);
172
173 return p[metric-1];
David S. Millerdefb3512010-12-08 21:16:57 -0800174}
175
David S. Miller5170ae82010-12-12 21:35:57 -0800176static inline u32
177dst_metric(const struct dst_entry *dst, const int metric)
178{
David S. Miller0dbaee32010-12-13 12:52:14 -0800179 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
David S. Millerd33e4552010-12-14 13:01:14 -0800180 metric == RTAX_ADVMSS ||
181 metric == RTAX_MTU);
David S. Miller5170ae82010-12-12 21:35:57 -0800182 return dst_metric_raw(dst, metric);
183}
184
David S. Miller0dbaee32010-12-13 12:52:14 -0800185static inline u32
186dst_metric_advmss(const struct dst_entry *dst)
187{
188 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
189
190 if (!advmss)
191 advmss = dst->ops->default_advmss(dst);
192
193 return advmss;
194}
195
David S. Millerdefb3512010-12-08 21:16:57 -0800196static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
197{
David S. Miller62fa8a82011-01-26 20:51:05 -0800198 u32 *p = dst_metrics_write_ptr(dst);
David S. Millerdefb3512010-12-08 21:16:57 -0800199
David S. Miller62fa8a82011-01-26 20:51:05 -0800200 if (p)
201 p[metric-1] = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000204static inline u32
205dst_feature(const struct dst_entry *dst, u32 feature)
206{
David S. Millerbb5b7c12009-12-15 20:56:42 -0800207 return dst_metric(dst, RTAX_FEATURES) & feature;
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210static inline u32 dst_mtu(const struct dst_entry *dst)
211{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000212 return dst->ops->mtu(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Stephen Hemmingerc1e20f72008-07-18 23:02:15 -0700215/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
216static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
217{
218 return msecs_to_jiffies(dst_metric(dst, metric));
219}
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static inline u32
222dst_allfrag(const struct dst_entry *dst)
223{
Gilad Ben-Yossef0c3adfb2009-10-28 04:15:23 +0000224 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return ret;
226}
227
228static inline int
David S. Millerd33e4552010-12-14 13:01:14 -0800229dst_metric_locked(const struct dst_entry *dst, int metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
232}
233
Eldad Zack7f95e182012-06-16 15:14:49 +0200234static inline void dst_hold(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Eric Dumazet5635c102008-11-16 19:46:36 -0800236 /*
237 * If your kernel compilation stops here, please check
238 * __pad_to_align_refcnt declaration in struct dst_entry
239 */
240 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 atomic_inc(&dst->__refcnt);
242}
243
Pavel Emelyanov03f49f32007-11-10 21:28:34 -0800244static inline void dst_use(struct dst_entry *dst, unsigned long time)
245{
246 dst_hold(dst);
247 dst->__use++;
248 dst->lastuse = time;
249}
250
Eric Dumazet7fee2262010-05-11 23:19:48 +0000251static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
252{
253 dst->__use++;
254 dst->lastuse = time;
255}
256
Eldad Zack7f95e182012-06-16 15:14:49 +0200257static inline struct dst_entry *dst_clone(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
259 if (dst)
260 atomic_inc(&dst->__refcnt);
261 return dst;
262}
263
Joe Perchesa4023dd2013-09-20 11:23:21 -0700264void dst_release(struct dst_entry *dst);
Eric Dumazet7fee2262010-05-11 23:19:48 +0000265
266static inline void refdst_drop(unsigned long refdst)
267{
268 if (!(refdst & SKB_DST_NOREF))
269 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
270}
271
272/**
273 * skb_dst_drop - drops skb dst
274 * @skb: buffer
275 *
276 * Drops dst reference count if a reference was taken.
277 */
Eric Dumazetadf30902009-06-02 05:19:30 +0000278static inline void skb_dst_drop(struct sk_buff *skb)
279{
Eric Dumazet7fee2262010-05-11 23:19:48 +0000280 if (skb->_skb_refdst) {
281 refdst_drop(skb->_skb_refdst);
282 skb->_skb_refdst = 0UL;
283 }
284}
285
286static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
287{
288 nskb->_skb_refdst = oskb->_skb_refdst;
289 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
290 dst_clone(skb_dst(nskb));
291}
292
293/**
294 * skb_dst_force - makes sure skb dst is refcounted
295 * @skb: buffer
296 *
297 * If dst is not yet refcounted, let's do it
298 */
299static inline void skb_dst_force(struct sk_buff *skb)
300{
301 if (skb_dst_is_noref(skb)) {
302 WARN_ON(!rcu_read_lock_held());
303 skb->_skb_refdst &= ~SKB_DST_NOREF;
304 dst_clone(skb_dst(skb));
305 }
Eric Dumazetadf30902009-06-02 05:19:30 +0000306}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700308
309/**
Eric Dumazet290b8952010-09-27 00:33:35 +0000310 * __skb_tunnel_rx - prepare skb for rx reinsert
311 * @skb: buffer
312 * @dev: tunnel device
Nicolas Dichtelea231922013-09-02 15:34:58 +0200313 * @net: netns for packet i/o
Eric Dumazet290b8952010-09-27 00:33:35 +0000314 *
315 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
316 * so make some cleanups. (no accounting done)
317 */
Nicolas Dichtelea231922013-09-02 15:34:58 +0200318static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
319 struct net *net)
Eric Dumazet290b8952010-09-27 00:33:35 +0000320{
321 skb->dev = dev;
Tom Herbertbdeab992011-08-14 19:45:55 +0000322
323 /*
Tom Herbert7539fad2013-12-15 22:12:18 -0800324 * Clear hash so that we can recalulate the hash for the
Tom Herbertbdeab992011-08-14 19:45:55 +0000325 * encapsulated packet, unless we have already determine the hash
326 * over the L4 4-tuple.
327 */
Tom Herbert7539fad2013-12-15 22:12:18 -0800328 skb_clear_hash_if_not_l4(skb);
Eric Dumazet290b8952010-09-27 00:33:35 +0000329 skb_set_queue_mapping(skb, 0);
Nicolas Dichtelea231922013-09-02 15:34:58 +0200330 skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
Eric Dumazet290b8952010-09-27 00:33:35 +0000331}
332
333/**
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700334 * skb_tunnel_rx - prepare skb for rx reinsert
335 * @skb: buffer
336 * @dev: tunnel device
337 *
338 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
339 * so make some cleanups, and perform accounting.
Eric Dumazet290b8952010-09-27 00:33:35 +0000340 * Note: this accounting is not SMP safe.
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700341 */
Nicolas Dichtelea231922013-09-02 15:34:58 +0200342static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
343 struct net *net)
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700344{
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700345 /* TODO : stats should be SMP safe */
346 dev->stats.rx_packets++;
347 dev->stats.rx_bytes += skb->len;
Nicolas Dichtelea231922013-09-02 15:34:58 +0200348 __skb_tunnel_rx(skb, dev, net);
Eric Dumazetd19d56d2010-05-17 22:36:55 -0700349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351/* Children define the path of the packet through the
352 * Linux networking. Thus, destinations are stackable.
353 */
354
Steffen Klassert8764ab22010-06-04 01:57:38 +0000355static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Steffen Klasserte4334302011-03-15 21:09:32 +0000357 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Steffen Klassert8764ab22010-06-04 01:57:38 +0000359 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return child;
361}
362
Joe Perchesa4023dd2013-09-20 11:23:21 -0700363int dst_discard(struct sk_buff *skb);
364void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
365 int initial_obsolete, unsigned short flags);
366void __dst_free(struct dst_entry *dst);
367struct dst_entry *dst_destroy(struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Eldad Zack7f95e182012-06-16 15:14:49 +0200369static inline void dst_free(struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
David S. Millerf5b0a872012-07-19 12:31:33 -0700371 if (dst->obsolete > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 return;
373 if (!atomic_read(&dst->__refcnt)) {
374 dst = dst_destroy(dst);
375 if (!dst)
376 return;
377 }
378 __dst_free(dst);
379}
380
381static inline void dst_rcu_free(struct rcu_head *head)
382{
383 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
384 dst_free(dst);
385}
386
387static inline void dst_confirm(struct dst_entry *dst)
388{
David S. Miller5110effe2012-07-02 02:21:03 -0700389 dst->pending_confirm = 1;
390}
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000391
David S. Miller5110effe2012-07-02 02:21:03 -0700392static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
393 struct sk_buff *skb)
394{
Eric Dumazet425f09a2012-08-07 02:19:56 +0000395 const struct hh_cache *hh;
David S. Miller5110effe2012-07-02 02:21:03 -0700396
Eric Dumazet425f09a2012-08-07 02:19:56 +0000397 if (dst->pending_confirm) {
398 unsigned long now = jiffies;
399
David S. Miller5110effe2012-07-02 02:21:03 -0700400 dst->pending_confirm = 0;
Eric Dumazet425f09a2012-08-07 02:19:56 +0000401 /* avoid dirtying neighbour */
402 if (n->confirmed != now)
403 n->confirmed = now;
David S. Miller69cce1d2011-07-17 23:09:49 -0700404 }
David S. Miller5110effe2012-07-02 02:21:03 -0700405
406 hh = &n->hh;
407 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
408 return neigh_hh_output(hh, skb);
409 else
410 return n->output(n, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
412
David S. Millerd3aaeb32011-07-18 00:40:17 -0700413static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
414{
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +0000415 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
416 return IS_ERR(n) ? NULL : n;
David S. Millerf894cbf2012-07-02 21:52:24 -0700417}
418
419static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
420 struct sk_buff *skb)
421{
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +0000422 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL);
423 return IS_ERR(n) ? NULL : n;
David S. Millerd3aaeb32011-07-18 00:40:17 -0700424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426static inline void dst_link_failure(struct sk_buff *skb)
427{
Eric Dumazetadf30902009-06-02 05:19:30 +0000428 struct dst_entry *dst = skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (dst && dst->ops && dst->ops->link_failure)
430 dst->ops->link_failure(skb);
431}
432
433static inline void dst_set_expires(struct dst_entry *dst, int timeout)
434{
435 unsigned long expires = jiffies + timeout;
436
437 if (expires == 0)
438 expires = 1;
439
440 if (dst->expires == 0 || time_before(expires, dst->expires))
441 dst->expires = expires;
442}
443
444/* Output packet to network from transport. */
445static inline int dst_output(struct sk_buff *skb)
446{
Eric Dumazetadf30902009-06-02 05:19:30 +0000447 return skb_dst(skb)->output(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
450/* Input packet from network to transport. */
451static inline int dst_input(struct sk_buff *skb)
452{
Eric Dumazetadf30902009-06-02 05:19:30 +0000453 return skb_dst(skb)->input(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455
456static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
457{
458 if (dst->obsolete)
459 dst = dst->ops->check(dst, cookie);
460 return dst;
461}
462
Joe Perchesa4023dd2013-09-20 11:23:21 -0700463void dst_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Herbert Xu815f4e52007-12-12 10:36:59 -0800465/* Flags for xfrm_lookup flags argument. */
466enum {
David S. Miller80c0bc92011-03-01 14:36:37 -0800467 XFRM_LOOKUP_ICMP = 1 << 0,
Herbert Xu815f4e52007-12-12 10:36:59 -0800468};
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470struct flowi;
471#ifndef CONFIG_XFRM
David S. Miller452edd52011-03-02 13:27:41 -0800472static inline struct dst_entry *xfrm_lookup(struct net *net,
473 struct dst_entry *dst_orig,
474 const struct flowi *fl, struct sock *sk,
475 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
David S. Miller452edd52011-03-02 13:27:41 -0800477 return dst_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
Vlad Yaseviche87b3992013-10-15 22:01:29 -0400479
480static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
481{
482 return NULL;
483}
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485#else
Joe Perchesa4023dd2013-09-20 11:23:21 -0700486struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
487 const struct flowi *fl, struct sock *sk,
488 int flags);
Vlad Yaseviche87b3992013-10-15 22:01:29 -0400489
490/* skb attached with this dst needs transformation if dst->xfrm is valid */
491static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
492{
493 return dst->xfrm;
494}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497#endif /* _NET_DST_H */