Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 1 | /* |
| 2 | * To speed up listener socket lookup, create an array to store all sockets |
| 3 | * listening on the same port. This allows a decision to be made after finding |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 4 | * the first socket. An optional BPF program can also be configured for |
| 5 | * selecting the socket index from the array of available sockets. |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <net/sock_reuseport.h> |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 9 | #include <linux/bpf.h> |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 10 | #include <linux/rcupdate.h> |
| 11 | |
| 12 | #define INIT_SOCKS 128 |
| 13 | |
| 14 | static DEFINE_SPINLOCK(reuseport_lock); |
| 15 | |
| 16 | static struct sock_reuseport *__reuseport_alloc(u16 max_socks) |
| 17 | { |
| 18 | size_t size = sizeof(struct sock_reuseport) + |
| 19 | sizeof(struct sock *) * max_socks; |
| 20 | struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); |
| 21 | |
| 22 | if (!reuse) |
| 23 | return NULL; |
| 24 | |
| 25 | reuse->max_socks = max_socks; |
| 26 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 27 | RCU_INIT_POINTER(reuse->prog, NULL); |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 28 | return reuse; |
| 29 | } |
| 30 | |
| 31 | int reuseport_alloc(struct sock *sk) |
| 32 | { |
| 33 | struct sock_reuseport *reuse; |
| 34 | |
| 35 | /* bh lock used since this function call may precede hlist lock in |
| 36 | * soft irq of receive path or setsockopt from process context |
| 37 | */ |
| 38 | spin_lock_bh(&reuseport_lock); |
Craig Gallek | 3b0b4d2 | 2017-10-19 15:00:29 -0400 | [diff] [blame] | 39 | |
| 40 | /* Allocation attempts can occur concurrently via the setsockopt path |
| 41 | * and the bind/hash path. Nothing to do when we lose the race. |
| 42 | */ |
| 43 | if (rcu_dereference_protected(sk->sk_reuseport_cb, |
| 44 | lockdep_is_held(&reuseport_lock))) |
| 45 | goto out; |
| 46 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 47 | reuse = __reuseport_alloc(INIT_SOCKS); |
| 48 | if (!reuse) { |
| 49 | spin_unlock_bh(&reuseport_lock); |
| 50 | return -ENOMEM; |
| 51 | } |
| 52 | |
| 53 | reuse->socks[0] = sk; |
| 54 | reuse->num_socks = 1; |
| 55 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
| 56 | |
Craig Gallek | 3b0b4d2 | 2017-10-19 15:00:29 -0400 | [diff] [blame] | 57 | out: |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 58 | spin_unlock_bh(&reuseport_lock); |
| 59 | |
| 60 | return 0; |
| 61 | } |
| 62 | EXPORT_SYMBOL(reuseport_alloc); |
| 63 | |
| 64 | static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) |
| 65 | { |
| 66 | struct sock_reuseport *more_reuse; |
| 67 | u32 more_socks_size, i; |
| 68 | |
| 69 | more_socks_size = reuse->max_socks * 2U; |
| 70 | if (more_socks_size > U16_MAX) |
| 71 | return NULL; |
| 72 | |
| 73 | more_reuse = __reuseport_alloc(more_socks_size); |
| 74 | if (!more_reuse) |
| 75 | return NULL; |
| 76 | |
| 77 | more_reuse->max_socks = more_socks_size; |
| 78 | more_reuse->num_socks = reuse->num_socks; |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 79 | more_reuse->prog = reuse->prog; |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 80 | |
| 81 | memcpy(more_reuse->socks, reuse->socks, |
| 82 | reuse->num_socks * sizeof(struct sock *)); |
| 83 | |
| 84 | for (i = 0; i < reuse->num_socks; ++i) |
| 85 | rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, |
| 86 | more_reuse); |
| 87 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 88 | /* Note: we use kfree_rcu here instead of reuseport_free_rcu so |
| 89 | * that reuse and more_reuse can temporarily share a reference |
| 90 | * to prog. |
| 91 | */ |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 92 | kfree_rcu(reuse, rcu); |
| 93 | return more_reuse; |
| 94 | } |
| 95 | |
Eric Dumazet | b671f40 | 2018-02-02 10:27:27 -0800 | [diff] [blame] | 96 | static void reuseport_free_rcu(struct rcu_head *head) |
| 97 | { |
| 98 | struct sock_reuseport *reuse; |
| 99 | |
| 100 | reuse = container_of(head, struct sock_reuseport, rcu); |
| 101 | if (reuse->prog) |
| 102 | bpf_prog_destroy(reuse->prog); |
| 103 | kfree(reuse); |
| 104 | } |
| 105 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 106 | /** |
| 107 | * reuseport_add_sock - Add a socket to the reuseport group of another. |
| 108 | * @sk: New socket to add to the group. |
| 109 | * @sk2: Socket belonging to the existing reuseport group. |
| 110 | * May return ENOMEM and not add socket to group under memory pressure. |
| 111 | */ |
Craig Gallek | b4ace4f | 2016-01-19 14:27:08 -0500 | [diff] [blame] | 112 | int reuseport_add_sock(struct sock *sk, struct sock *sk2) |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 113 | { |
Eric Dumazet | b671f40 | 2018-02-02 10:27:27 -0800 | [diff] [blame] | 114 | struct sock_reuseport *old_reuse, *reuse; |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 115 | |
Craig Gallek | b4ace4f | 2016-01-19 14:27:08 -0500 | [diff] [blame] | 116 | if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { |
| 117 | int err = reuseport_alloc(sk2); |
| 118 | |
| 119 | if (err) |
| 120 | return err; |
| 121 | } |
| 122 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 123 | spin_lock_bh(&reuseport_lock); |
| 124 | reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, |
Eric Dumazet | b671f40 | 2018-02-02 10:27:27 -0800 | [diff] [blame] | 125 | lockdep_is_held(&reuseport_lock)); |
| 126 | old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
| 127 | lockdep_is_held(&reuseport_lock)); |
| 128 | if (old_reuse && old_reuse->num_socks != 1) { |
| 129 | spin_unlock_bh(&reuseport_lock); |
| 130 | return -EBUSY; |
| 131 | } |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 132 | |
| 133 | if (reuse->num_socks == reuse->max_socks) { |
| 134 | reuse = reuseport_grow(reuse); |
| 135 | if (!reuse) { |
| 136 | spin_unlock_bh(&reuseport_lock); |
| 137 | return -ENOMEM; |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | reuse->socks[reuse->num_socks] = sk; |
| 142 | /* paired with smp_rmb() in reuseport_select_sock() */ |
| 143 | smp_wmb(); |
| 144 | reuse->num_socks++; |
| 145 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
| 146 | |
| 147 | spin_unlock_bh(&reuseport_lock); |
| 148 | |
Eric Dumazet | b671f40 | 2018-02-02 10:27:27 -0800 | [diff] [blame] | 149 | if (old_reuse) |
| 150 | call_rcu(&old_reuse->rcu, reuseport_free_rcu); |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 151 | return 0; |
| 152 | } |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 153 | |
| 154 | void reuseport_detach_sock(struct sock *sk) |
| 155 | { |
| 156 | struct sock_reuseport *reuse; |
| 157 | int i; |
| 158 | |
| 159 | spin_lock_bh(&reuseport_lock); |
| 160 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
| 161 | lockdep_is_held(&reuseport_lock)); |
| 162 | rcu_assign_pointer(sk->sk_reuseport_cb, NULL); |
| 163 | |
| 164 | for (i = 0; i < reuse->num_socks; i++) { |
| 165 | if (reuse->socks[i] == sk) { |
| 166 | reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; |
| 167 | reuse->num_socks--; |
| 168 | if (reuse->num_socks == 0) |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 169 | call_rcu(&reuse->rcu, reuseport_free_rcu); |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 170 | break; |
| 171 | } |
| 172 | } |
| 173 | spin_unlock_bh(&reuseport_lock); |
| 174 | } |
| 175 | EXPORT_SYMBOL(reuseport_detach_sock); |
| 176 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 177 | static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks, |
| 178 | struct bpf_prog *prog, struct sk_buff *skb, |
| 179 | int hdr_len) |
| 180 | { |
| 181 | struct sk_buff *nskb = NULL; |
| 182 | u32 index; |
| 183 | |
| 184 | if (skb_shared(skb)) { |
| 185 | nskb = skb_clone(skb, GFP_ATOMIC); |
| 186 | if (!nskb) |
| 187 | return NULL; |
| 188 | skb = nskb; |
| 189 | } |
| 190 | |
| 191 | /* temporarily advance data past protocol header */ |
| 192 | if (!pskb_pull(skb, hdr_len)) { |
Craig Gallek | 00ce3a1 | 2016-01-05 10:57:13 -0500 | [diff] [blame] | 193 | kfree_skb(nskb); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 194 | return NULL; |
| 195 | } |
| 196 | index = bpf_prog_run_save_cb(prog, skb); |
| 197 | __skb_push(skb, hdr_len); |
| 198 | |
| 199 | consume_skb(nskb); |
| 200 | |
| 201 | if (index >= socks) |
| 202 | return NULL; |
| 203 | |
| 204 | return reuse->socks[index]; |
| 205 | } |
| 206 | |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 207 | /** |
| 208 | * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. |
| 209 | * @sk: First socket in the group. |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 210 | * @hash: When no BPF filter is available, use this hash to select. |
| 211 | * @skb: skb to run through BPF filter. |
| 212 | * @hdr_len: BPF filter expects skb data pointer at payload data. If |
| 213 | * the skb does not yet point at the payload, this parameter represents |
| 214 | * how far the pointer needs to advance to reach the payload. |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 215 | * Returns a socket that should receive the packet (or NULL on error). |
| 216 | */ |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 217 | struct sock *reuseport_select_sock(struct sock *sk, |
| 218 | u32 hash, |
| 219 | struct sk_buff *skb, |
| 220 | int hdr_len) |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 221 | { |
| 222 | struct sock_reuseport *reuse; |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 223 | struct bpf_prog *prog; |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 224 | struct sock *sk2 = NULL; |
| 225 | u16 socks; |
| 226 | |
| 227 | rcu_read_lock(); |
| 228 | reuse = rcu_dereference(sk->sk_reuseport_cb); |
| 229 | |
| 230 | /* if memory allocation failed or add call is not yet complete */ |
| 231 | if (!reuse) |
| 232 | goto out; |
| 233 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 234 | prog = rcu_dereference(reuse->prog); |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 235 | socks = READ_ONCE(reuse->num_socks); |
| 236 | if (likely(socks)) { |
| 237 | /* paired with smp_wmb() in reuseport_add_sock() */ |
| 238 | smp_rmb(); |
| 239 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 240 | if (prog && skb) |
| 241 | sk2 = run_bpf(reuse, socks, prog, skb, hdr_len); |
| 242 | else |
| 243 | sk2 = reuse->socks[reciprocal_scale(hash, socks)]; |
Craig Gallek | ef45614 | 2016-01-04 17:41:45 -0500 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | out: |
| 247 | rcu_read_unlock(); |
| 248 | return sk2; |
| 249 | } |
| 250 | EXPORT_SYMBOL(reuseport_select_sock); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 251 | |
| 252 | struct bpf_prog * |
| 253 | reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) |
| 254 | { |
| 255 | struct sock_reuseport *reuse; |
| 256 | struct bpf_prog *old_prog; |
| 257 | |
| 258 | spin_lock_bh(&reuseport_lock); |
| 259 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
| 260 | lockdep_is_held(&reuseport_lock)); |
| 261 | old_prog = rcu_dereference_protected(reuse->prog, |
| 262 | lockdep_is_held(&reuseport_lock)); |
| 263 | rcu_assign_pointer(reuse->prog, prog); |
| 264 | spin_unlock_bh(&reuseport_lock); |
| 265 | |
| 266 | return old_prog; |
| 267 | } |
| 268 | EXPORT_SYMBOL(reuseport_attach_prog); |