Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INETPEER - A storage for permanent information about peers |
| 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
| 5 | */ |
| 6 | |
| 7 | #ifndef _NET_INETPEER_H |
| 8 | #define _NET_INETPEER_H |
| 9 | |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/jiffies.h> |
| 13 | #include <linux/spinlock.h> |
David S. Miller | 6065982 | 2011-01-26 20:55:53 -0800 | [diff] [blame] | 14 | #include <linux/rtnetlink.h> |
David S. Miller | 672f007 | 2010-11-30 12:20:00 -0800 | [diff] [blame] | 15 | #include <net/ipv6.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 16 | #include <linux/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
David S. Miller | 7a71ed8 | 2011-02-09 14:30:26 -0800 | [diff] [blame] | 18 | struct inetpeer_addr_base { |
David S. Miller | 582a72d | 2010-11-30 11:53:55 -0800 | [diff] [blame] | 19 | union { |
David S. Miller | 7a71ed8 | 2011-02-09 14:30:26 -0800 | [diff] [blame] | 20 | __be32 a4; |
| 21 | __be32 a6[4]; |
David S. Miller | 582a72d | 2010-11-30 11:53:55 -0800 | [diff] [blame] | 22 | }; |
David S. Miller | 7a71ed8 | 2011-02-09 14:30:26 -0800 | [diff] [blame] | 23 | }; |
| 24 | |
| 25 | struct inetpeer_addr { |
| 26 | struct inetpeer_addr_base addr; |
| 27 | __u16 family; |
David S. Miller | 8790ca1 | 2010-12-01 17:28:18 -0800 | [diff] [blame] | 28 | }; |
David S. Miller | 582a72d | 2010-11-30 11:53:55 -0800 | [diff] [blame] | 29 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 30 | struct inet_peer { |
Eric Dumazet | 78d7942 | 2006-10-20 00:28:35 -0700 | [diff] [blame] | 31 | /* group together avl_left,avl_right,v4daddr to speedup lookups */ |
Eric Dumazet | b914c4e | 2010-10-25 23:55:38 +0000 | [diff] [blame] | 32 | struct inet_peer __rcu *avl_left, *avl_right; |
David S. Miller | 8790ca1 | 2010-12-01 17:28:18 -0800 | [diff] [blame] | 33 | struct inetpeer_addr daddr; |
Eric Dumazet | 2c1409a | 2009-11-12 09:33:09 +0000 | [diff] [blame] | 34 | __u32 avl_height; |
Eric Dumazet | 2b77bdd | 2011-06-08 23:31:27 -0700 | [diff] [blame] | 35 | |
| 36 | u32 metrics[RTAX_MAX]; |
| 37 | u32 rate_tokens; /* rate limiting for ICMP */ |
| 38 | unsigned long rate_last; |
Eric Dumazet | 55432d2 | 2012-06-05 03:00:18 +0000 | [diff] [blame] | 39 | union { |
| 40 | struct list_head gc_list; |
| 41 | struct rcu_head gc_rcu; |
| 42 | }; |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 43 | /* |
| 44 | * Once inet_peer is queued for deletion (refcnt == -1), following fields |
David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 45 | * are not available: rid, ip_id_count |
David S. Miller | 6065982 | 2011-01-26 20:55:53 -0800 | [diff] [blame] | 46 | * We can share memory with rcu_head to help keep inet_peer small. |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 47 | */ |
| 48 | union { |
| 49 | struct { |
David S. Miller | ddd4aa4 | 2011-02-09 15:36:47 -0800 | [diff] [blame] | 50 | atomic_t rid; /* Frag reception counter */ |
| 51 | atomic_t ip_id_count; /* IP ID for the next packet */ |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 52 | }; |
| 53 | struct rcu_head rcu; |
Eric Dumazet | 4b9d9be | 2011-06-08 13:35:34 +0000 | [diff] [blame] | 54 | struct inet_peer *gc_next; |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 55 | }; |
Eric Dumazet | 2b77bdd | 2011-06-08 23:31:27 -0700 | [diff] [blame] | 56 | |
| 57 | /* following fields might be frequently dirtied */ |
| 58 | __u32 dtime; /* the time of last use of not referenced entries */ |
| 59 | atomic_t refcnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | }; |
| 61 | |
David S. Miller | c3426b4 | 2012-06-09 16:27:05 -0700 | [diff] [blame] | 62 | struct inet_peer_base { |
| 63 | struct inet_peer __rcu *root; |
| 64 | seqlock_t lock; |
David S. Miller | b48c80ece | 2012-06-10 00:24:21 -0700 | [diff] [blame] | 65 | u32 flush_seq; |
David S. Miller | c3426b4 | 2012-06-09 16:27:05 -0700 | [diff] [blame] | 66 | int total; |
| 67 | }; |
| 68 | |
David S. Miller | 97bab73 | 2012-06-09 22:36:36 -0700 | [diff] [blame] | 69 | #define INETPEER_BASE_BIT 0x1UL |
| 70 | |
| 71 | static inline struct inet_peer *inetpeer_ptr(unsigned long val) |
| 72 | { |
| 73 | BUG_ON(val & INETPEER_BASE_BIT); |
| 74 | return (struct inet_peer *) val; |
| 75 | } |
| 76 | |
| 77 | static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val) |
| 78 | { |
| 79 | if (!(val & INETPEER_BASE_BIT)) |
| 80 | return NULL; |
| 81 | val &= ~INETPEER_BASE_BIT; |
| 82 | return (struct inet_peer_base *) val; |
| 83 | } |
| 84 | |
| 85 | static inline bool inetpeer_ptr_is_peer(unsigned long val) |
| 86 | { |
| 87 | return !(val & INETPEER_BASE_BIT); |
| 88 | } |
| 89 | |
| 90 | static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer) |
| 91 | { |
| 92 | /* This implicitly clears INETPEER_BASE_BIT */ |
| 93 | *val = (unsigned long) peer; |
| 94 | } |
| 95 | |
| 96 | static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer) |
| 97 | { |
| 98 | unsigned long val = (unsigned long) peer; |
| 99 | unsigned long orig = *ptr; |
| 100 | |
David S. Miller | 7b34ca2 | 2012-06-11 04:13:57 -0700 | [diff] [blame] | 101 | if (!(orig & INETPEER_BASE_BIT) || |
David S. Miller | 97bab73 | 2012-06-09 22:36:36 -0700 | [diff] [blame] | 102 | cmpxchg(ptr, orig, val) != orig) |
| 103 | return false; |
| 104 | return true; |
| 105 | } |
| 106 | |
| 107 | static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base) |
| 108 | { |
| 109 | *ptr = (unsigned long) base | INETPEER_BASE_BIT; |
| 110 | } |
| 111 | |
| 112 | static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from) |
| 113 | { |
| 114 | unsigned long val = *from; |
| 115 | |
| 116 | *to = val; |
| 117 | if (inetpeer_ptr_is_peer(val)) { |
| 118 | struct inet_peer *peer = inetpeer_ptr(val); |
| 119 | atomic_inc(&peer->refcnt); |
| 120 | } |
| 121 | } |
| 122 | |
David S. Miller | c3426b4 | 2012-06-09 16:27:05 -0700 | [diff] [blame] | 123 | extern void inet_peer_base_init(struct inet_peer_base *); |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | void inet_initpeers(void) __init; |
| 126 | |
David S. Miller | 144001b | 2011-01-27 13:52:16 -0800 | [diff] [blame] | 127 | #define INETPEER_METRICS_NEW (~(u32) 0) |
| 128 | |
| 129 | static inline bool inet_metrics_new(const struct inet_peer *p) |
| 130 | { |
| 131 | return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW; |
| 132 | } |
| 133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | /* can be called with or without local BH being disabled */ |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 135 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, |
Gao feng | c8a627e | 2012-06-08 01:20:41 +0000 | [diff] [blame] | 136 | const struct inetpeer_addr *daddr, |
| 137 | int create); |
David S. Miller | b534ecf | 2010-11-30 11:54:19 -0800 | [diff] [blame] | 138 | |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 139 | static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, |
Gao feng | 54db0cc | 2012-06-08 01:21:40 +0000 | [diff] [blame] | 140 | __be32 v4daddr, |
| 141 | int create) |
David S. Miller | b534ecf | 2010-11-30 11:54:19 -0800 | [diff] [blame] | 142 | { |
David S. Miller | 8790ca1 | 2010-12-01 17:28:18 -0800 | [diff] [blame] | 143 | struct inetpeer_addr daddr; |
David S. Miller | b534ecf | 2010-11-30 11:54:19 -0800 | [diff] [blame] | 144 | |
David S. Miller | 7a71ed8 | 2011-02-09 14:30:26 -0800 | [diff] [blame] | 145 | daddr.addr.a4 = v4daddr; |
David S. Miller | b534ecf | 2010-11-30 11:54:19 -0800 | [diff] [blame] | 146 | daddr.family = AF_INET; |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 147 | return inet_getpeer(base, &daddr, create); |
David S. Miller | b534ecf | 2010-11-30 11:54:19 -0800 | [diff] [blame] | 148 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 150 | static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, |
Gao feng | 54db0cc | 2012-06-08 01:21:40 +0000 | [diff] [blame] | 151 | const struct in6_addr *v6daddr, |
| 152 | int create) |
David S. Miller | 672f007 | 2010-11-30 12:20:00 -0800 | [diff] [blame] | 153 | { |
David S. Miller | 8790ca1 | 2010-12-01 17:28:18 -0800 | [diff] [blame] | 154 | struct inetpeer_addr daddr; |
David S. Miller | 672f007 | 2010-11-30 12:20:00 -0800 | [diff] [blame] | 155 | |
Alexey Dobriyan | 4e3fd7a | 2011-11-21 03:39:03 +0000 | [diff] [blame] | 156 | *(struct in6_addr *)daddr.addr.a6 = *v6daddr; |
David S. Miller | 672f007 | 2010-11-30 12:20:00 -0800 | [diff] [blame] | 157 | daddr.family = AF_INET6; |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 158 | return inet_getpeer(base, &daddr, create); |
David S. Miller | 672f007 | 2010-11-30 12:20:00 -0800 | [diff] [blame] | 159 | } |
| 160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | /* can be called from BH context or outside */ |
Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 162 | extern void inet_putpeer(struct inet_peer *p); |
David S. Miller | 92d8682 | 2011-02-04 15:55:25 -0800 | [diff] [blame] | 163 | extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
David S. Miller | 56a6b24 | 2012-06-09 16:32:41 -0700 | [diff] [blame] | 165 | extern void inetpeer_invalidate_tree(struct inet_peer_base *); |
David S. Miller | b48c80ece | 2012-06-10 00:24:21 -0700 | [diff] [blame] | 166 | extern void inetpeer_invalidate_family(int family); |
Steffen Klassert | 5faa5df | 2012-03-06 21:20:26 +0000 | [diff] [blame] | 167 | |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 168 | /* |
| 169 | * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, |
| 170 | * tcp_ts_stamp if no refcount is taken on inet_peer |
| 171 | */ |
| 172 | static inline void inet_peer_refcheck(const struct inet_peer *p) |
| 173 | { |
| 174 | WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); |
| 175 | } |
| 176 | |
| 177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | /* can be called with or without local BH being disabled */ |
Eric Dumazet | 87c48fa | 2011-07-21 21:25:58 -0700 | [diff] [blame] | 179 | static inline int inet_getid(struct inet_peer *p, int more) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | { |
Eric Dumazet | 87c48fa | 2011-07-21 21:25:58 -0700 | [diff] [blame] | 181 | int old, new; |
Eric Dumazet | 2c1409a | 2009-11-12 09:33:09 +0000 | [diff] [blame] | 182 | more++; |
Eric Dumazet | 317fe0e | 2010-06-16 04:52:13 +0000 | [diff] [blame] | 183 | inet_peer_refcheck(p); |
Eric Dumazet | 87c48fa | 2011-07-21 21:25:58 -0700 | [diff] [blame] | 184 | do { |
| 185 | old = atomic_read(&p->ip_id_count); |
| 186 | new = old + more; |
| 187 | if (!new) |
| 188 | new = 1; |
| 189 | } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); |
| 190 | return new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | #endif /* _NET_INETPEER_H */ |