Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Linux Socket Filter - Kernel level socket filtering |
| 3 | * |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 4 | * Based on the design of the Berkeley Packet Filter. The new |
| 5 | * internal format has been designed by PLUMgrid: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 7 | * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com |
| 8 | * |
| 9 | * Authors: |
| 10 | * |
| 11 | * Jay Schulist <jschlst@samba.org> |
| 12 | * Alexei Starovoitov <ast@plumgrid.com> |
| 13 | * Daniel Borkmann <dborkman@redhat.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | * |
| 20 | * Andi Kleen - Fix a few bad bugs and races. |
Alexei Starovoitov | 4df95ff | 2014-07-30 20:34:14 -0700 | [diff] [blame] | 21 | * Kris Katterjohn - Added many additional checks in bpf_check_classic() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | */ |
| 23 | |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/mm.h> |
| 27 | #include <linux/fcntl.h> |
| 28 | #include <linux/socket.h> |
| 29 | #include <linux/in.h> |
| 30 | #include <linux/inet.h> |
| 31 | #include <linux/netdevice.h> |
| 32 | #include <linux/if_packet.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <net/ip.h> |
| 35 | #include <net/protocol.h> |
Patrick McHardy | 4738c1d | 2008-04-10 02:02:28 -0700 | [diff] [blame] | 36 | #include <net/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/skbuff.h> |
| 38 | #include <net/sock.h> |
Jiri Pirko | 10b89ee4 | 2015-05-12 14:56:09 +0200 | [diff] [blame] | 39 | #include <net/flow_dissector.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/errno.h> |
| 41 | #include <linux/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/uaccess.h> |
Dmitry Mishin | 40daafc | 2006-04-18 14:50:10 -0700 | [diff] [blame] | 43 | #include <asm/unaligned.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/filter.h> |
David S. Miller | 86e4ca6 | 2011-05-26 15:00:31 -0400 | [diff] [blame] | 45 | #include <linux/ratelimit.h> |
Will Drewry | 46b325c | 2012-04-12 16:47:52 -0500 | [diff] [blame] | 46 | #include <linux/seccomp.h> |
Eric Dumazet | f333503 | 2012-10-27 02:26:17 +0000 | [diff] [blame] | 47 | #include <linux/if_vlan.h> |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 48 | #include <linux/bpf.h> |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 49 | #include <net/sch_generic.h> |
Daniel Borkmann | 8d20aab | 2015-07-15 14:21:42 +0200 | [diff] [blame] | 50 | #include <net/cls_cgroup.h> |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 51 | #include <net/dst_metadata.h> |
Daniel Borkmann | c46646d | 2015-09-30 01:41:51 +0200 | [diff] [blame] | 52 | #include <net/dst.h> |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 53 | #include <net/sock_reuseport.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /** |
Willem de Bruijn | f4979fc | 2016-07-12 18:18:56 -0400 | [diff] [blame] | 56 | * sk_filter_trim_cap - run a packet through a socket filter |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 57 | * @sk: sock associated with &sk_buff |
| 58 | * @skb: buffer to filter |
Willem de Bruijn | f4979fc | 2016-07-12 18:18:56 -0400 | [diff] [blame] | 59 | * @cap: limit on how short the eBPF program may trim the packet |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 60 | * |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 61 | * Run the eBPF program and then cut skb->data to correct size returned by |
| 62 | * the program. If pkt_len is 0 we toss packet. If skb->len is smaller |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 63 | * than pkt_len we keep whole skb->data. This is the socket level |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 64 | * wrapper to BPF_PROG_RUN. It returns 0 if the packet should |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 65 | * be accepted or -EPERM if the packet should be tossed. |
| 66 | * |
| 67 | */ |
Willem de Bruijn | f4979fc | 2016-07-12 18:18:56 -0400 | [diff] [blame] | 68 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 69 | { |
| 70 | int err; |
| 71 | struct sk_filter *filter; |
| 72 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 73 | /* |
| 74 | * If the skb was allocated from pfmemalloc reserves, only |
| 75 | * allow SOCK_MEMALLOC sockets to use it as this socket is |
| 76 | * helping free memory |
| 77 | */ |
| 78 | if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) |
| 79 | return -ENOMEM; |
| 80 | |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 81 | err = security_sock_rcv_skb(sk, skb); |
| 82 | if (err) |
| 83 | return err; |
| 84 | |
Eric Dumazet | 80f8f10 | 2011-01-18 07:46:52 +0000 | [diff] [blame] | 85 | rcu_read_lock(); |
| 86 | filter = rcu_dereference(sk->sk_filter); |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 87 | if (filter) { |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 88 | unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); |
Willem de Bruijn | f4979fc | 2016-07-12 18:18:56 -0400 | [diff] [blame] | 89 | err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 90 | } |
Eric Dumazet | 80f8f10 | 2011-01-18 07:46:52 +0000 | [diff] [blame] | 91 | rcu_read_unlock(); |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 92 | |
| 93 | return err; |
| 94 | } |
Willem de Bruijn | f4979fc | 2016-07-12 18:18:56 -0400 | [diff] [blame] | 95 | EXPORT_SYMBOL(sk_filter_trim_cap); |
Stephen Hemminger | 43db6d6 | 2008-04-10 01:43:09 -0700 | [diff] [blame] | 96 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 97 | BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 98 | { |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 99 | return skb_get_poff(skb); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 100 | } |
| 101 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 102 | BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 103 | { |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 104 | struct nlattr *nla; |
| 105 | |
| 106 | if (skb_is_nonlinear(skb)) |
| 107 | return 0; |
| 108 | |
Mathias Krause | 05ab8f2 | 2014-04-13 18:23:33 +0200 | [diff] [blame] | 109 | if (skb->len < sizeof(struct nlattr)) |
| 110 | return 0; |
| 111 | |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 112 | if (a > skb->len - sizeof(struct nlattr)) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 113 | return 0; |
| 114 | |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 115 | nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 116 | if (nla) |
| 117 | return (void *) nla - (void *) skb->data; |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | return 0; |
| 120 | } |
| 121 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 122 | BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 123 | { |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 124 | struct nlattr *nla; |
| 125 | |
| 126 | if (skb_is_nonlinear(skb)) |
| 127 | return 0; |
| 128 | |
Mathias Krause | 05ab8f2 | 2014-04-13 18:23:33 +0200 | [diff] [blame] | 129 | if (skb->len < sizeof(struct nlattr)) |
| 130 | return 0; |
| 131 | |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 132 | if (a > skb->len - sizeof(struct nlattr)) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 133 | return 0; |
| 134 | |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 135 | nla = (struct nlattr *) &skb->data[a]; |
| 136 | if (nla->nla_len > skb->len - a) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 137 | return 0; |
| 138 | |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 139 | nla = nla_find_nested(nla, x); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 140 | if (nla) |
| 141 | return (void *) nla - (void *) skb->data; |
| 142 | |
| 143 | return 0; |
| 144 | } |
| 145 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 146 | BPF_CALL_0(__get_raw_cpu_id) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 147 | { |
| 148 | return raw_smp_processor_id(); |
| 149 | } |
| 150 | |
Daniel Borkmann | 80b48c4 | 2016-06-28 12:18:26 +0200 | [diff] [blame] | 151 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { |
| 152 | .func = __get_raw_cpu_id, |
| 153 | .gpl_only = false, |
| 154 | .ret_type = RET_INTEGER, |
| 155 | }; |
| 156 | |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 157 | static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, |
| 158 | struct bpf_insn *insn_buf) |
| 159 | { |
| 160 | struct bpf_insn *insn = insn_buf; |
| 161 | |
| 162 | switch (skb_field) { |
| 163 | case SKF_AD_MARK: |
| 164 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
| 165 | |
| 166 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 167 | offsetof(struct sk_buff, mark)); |
| 168 | break; |
| 169 | |
| 170 | case SKF_AD_PKTTYPE: |
| 171 | *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); |
| 172 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); |
| 173 | #ifdef __BIG_ENDIAN_BITFIELD |
| 174 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); |
| 175 | #endif |
| 176 | break; |
| 177 | |
| 178 | case SKF_AD_QUEUE: |
| 179 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); |
| 180 | |
| 181 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
| 182 | offsetof(struct sk_buff, queue_mapping)); |
| 183 | break; |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 184 | |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 185 | case SKF_AD_VLAN_TAG: |
| 186 | case SKF_AD_VLAN_TAG_PRESENT: |
| 187 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); |
| 188 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); |
| 189 | |
| 190 | /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ |
| 191 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
| 192 | offsetof(struct sk_buff, vlan_tci)); |
| 193 | if (skb_field == SKF_AD_VLAN_TAG) { |
| 194 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, |
| 195 | ~VLAN_TAG_PRESENT); |
| 196 | } else { |
| 197 | /* dst_reg >>= 12 */ |
| 198 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12); |
| 199 | /* dst_reg &= 1 */ |
| 200 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); |
| 201 | } |
| 202 | break; |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | return insn - insn_buf; |
| 206 | } |
| 207 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 208 | static bool convert_bpf_extensions(struct sock_filter *fp, |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 209 | struct bpf_insn **insnp) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 210 | { |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 211 | struct bpf_insn *insn = *insnp; |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 212 | u32 cnt; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 213 | |
| 214 | switch (fp->k) { |
| 215 | case SKF_AD_OFF + SKF_AD_PROTOCOL: |
Daniel Borkmann | 0b8c707 | 2015-03-19 19:38:27 +0100 | [diff] [blame] | 216 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
| 217 | |
| 218 | /* A = *(u16 *) (CTX + offsetof(protocol)) */ |
| 219 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
| 220 | offsetof(struct sk_buff, protocol)); |
| 221 | /* A = ntohs(A) [emitting a nop or swap16] */ |
| 222 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 223 | break; |
| 224 | |
| 225 | case SKF_AD_OFF + SKF_AD_PKTTYPE: |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 226 | cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); |
| 227 | insn += cnt - 1; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 228 | break; |
| 229 | |
| 230 | case SKF_AD_OFF + SKF_AD_IFINDEX: |
| 231 | case SKF_AD_OFF + SKF_AD_HATYPE: |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 232 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
| 233 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); |
| 234 | |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 235 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 236 | BPF_REG_TMP, BPF_REG_CTX, |
| 237 | offsetof(struct sk_buff, dev)); |
| 238 | /* if (tmp != 0) goto pc + 1 */ |
| 239 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); |
| 240 | *insn++ = BPF_EXIT_INSN(); |
| 241 | if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) |
| 242 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, |
| 243 | offsetof(struct net_device, ifindex)); |
| 244 | else |
| 245 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, |
| 246 | offsetof(struct net_device, type)); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 247 | break; |
| 248 | |
| 249 | case SKF_AD_OFF + SKF_AD_MARK: |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 250 | cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); |
| 251 | insn += cnt - 1; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 252 | break; |
| 253 | |
| 254 | case SKF_AD_OFF + SKF_AD_RXHASH: |
| 255 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
| 256 | |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 257 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
| 258 | offsetof(struct sk_buff, hash)); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 259 | break; |
| 260 | |
| 261 | case SKF_AD_OFF + SKF_AD_QUEUE: |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 262 | cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); |
| 263 | insn += cnt - 1; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 264 | break; |
| 265 | |
| 266 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 267 | cnt = convert_skb_access(SKF_AD_VLAN_TAG, |
| 268 | BPF_REG_A, BPF_REG_CTX, insn); |
| 269 | insn += cnt - 1; |
| 270 | break; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 271 | |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 272 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: |
| 273 | cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, |
| 274 | BPF_REG_A, BPF_REG_CTX, insn); |
| 275 | insn += cnt - 1; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 276 | break; |
| 277 | |
Michal Sekletar | 27cd545 | 2015-03-24 14:48:41 +0100 | [diff] [blame] | 278 | case SKF_AD_OFF + SKF_AD_VLAN_TPID: |
| 279 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); |
| 280 | |
| 281 | /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ |
| 282 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, |
| 283 | offsetof(struct sk_buff, vlan_proto)); |
| 284 | /* A = ntohs(A) [emitting a nop or swap16] */ |
| 285 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); |
| 286 | break; |
| 287 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 288 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
| 289 | case SKF_AD_OFF + SKF_AD_NLATTR: |
| 290 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: |
| 291 | case SKF_AD_OFF + SKF_AD_CPU: |
Chema Gonzalez | 4cd3675 | 2014-04-21 09:21:24 -0700 | [diff] [blame] | 292 | case SKF_AD_OFF + SKF_AD_RANDOM: |
Alexei Starovoitov | e430f34 | 2014-06-06 14:46:06 -0700 | [diff] [blame] | 293 | /* arg1 = CTX */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 294 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 295 | /* arg2 = A */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 296 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 297 | /* arg3 = X */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 298 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); |
Alexei Starovoitov | e430f34 | 2014-06-06 14:46:06 -0700 | [diff] [blame] | 299 | /* Emit call(arg1=CTX, arg2=A, arg3=X) */ |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 300 | switch (fp->k) { |
| 301 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 302 | *insn = BPF_EMIT_CALL(__skb_get_pay_offset); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 303 | break; |
| 304 | case SKF_AD_OFF + SKF_AD_NLATTR: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 305 | *insn = BPF_EMIT_CALL(__skb_get_nlattr); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 306 | break; |
| 307 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 308 | *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 309 | break; |
| 310 | case SKF_AD_OFF + SKF_AD_CPU: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 311 | *insn = BPF_EMIT_CALL(__get_raw_cpu_id); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 312 | break; |
Chema Gonzalez | 4cd3675 | 2014-04-21 09:21:24 -0700 | [diff] [blame] | 313 | case SKF_AD_OFF + SKF_AD_RANDOM: |
Daniel Borkmann | 3ad0040 | 2015-10-08 01:20:39 +0200 | [diff] [blame] | 314 | *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); |
| 315 | bpf_user_rnd_init_once(); |
Chema Gonzalez | 4cd3675 | 2014-04-21 09:21:24 -0700 | [diff] [blame] | 316 | break; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 317 | } |
| 318 | break; |
| 319 | |
| 320 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 321 | /* A ^= X */ |
| 322 | *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 323 | break; |
| 324 | |
| 325 | default: |
| 326 | /* This is just a dummy call to avoid letting the compiler |
| 327 | * evict __bpf_call_base() as an optimization. Placed here |
| 328 | * where no-one bothers. |
| 329 | */ |
| 330 | BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); |
| 331 | return false; |
| 332 | } |
| 333 | |
| 334 | *insnp = insn; |
| 335 | return true; |
| 336 | } |
| 337 | |
| 338 | /** |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 339 | * bpf_convert_filter - convert filter program |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 340 | * @prog: the user passed filter program |
| 341 | * @len: the length of the user passed filter program |
| 342 | * @new_prog: buffer where converted program will be stored |
| 343 | * @new_len: pointer to store length of converted program |
| 344 | * |
| 345 | * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style. |
| 346 | * Conversion workflow: |
| 347 | * |
| 348 | * 1) First pass for calculating the new program length: |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 349 | * bpf_convert_filter(old_prog, old_len, NULL, &new_len) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 350 | * |
| 351 | * 2) 2nd pass to remap in two passes: 1st pass finds new |
| 352 | * jump offsets, 2nd pass remapping: |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 353 | * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 354 | * bpf_convert_filter(old_prog, old_len, new_prog, &new_len); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 355 | */ |
Nicolas Schichan | d9e12f4 | 2015-05-06 16:12:28 +0200 | [diff] [blame] | 356 | static int bpf_convert_filter(struct sock_filter *prog, int len, |
| 357 | struct bpf_insn *new_prog, int *new_len) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 358 | { |
| 359 | int new_flen = 0, pass = 0, target, i; |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 360 | struct bpf_insn *new_insn; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 361 | struct sock_filter *fp; |
| 362 | int *addrs = NULL; |
| 363 | u8 bpf_src; |
| 364 | |
| 365 | BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); |
Daniel Borkmann | 3074383 | 2014-05-01 18:34:19 +0200 | [diff] [blame] | 366 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 367 | |
Kees Cook | 6f9a093 | 2014-06-18 15:34:57 -0700 | [diff] [blame] | 368 | if (len <= 0 || len > BPF_MAXINSNS) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 369 | return -EINVAL; |
| 370 | |
| 371 | if (new_prog) { |
Daniel Borkmann | 658da93 | 2015-05-06 16:12:29 +0200 | [diff] [blame] | 372 | addrs = kcalloc(len, sizeof(*addrs), |
| 373 | GFP_KERNEL | __GFP_NOWARN); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 374 | if (!addrs) |
| 375 | return -ENOMEM; |
| 376 | } |
| 377 | |
| 378 | do_pass: |
| 379 | new_insn = new_prog; |
| 380 | fp = prog; |
| 381 | |
Daniel Borkmann | 8b614ae | 2015-12-17 23:51:54 +0100 | [diff] [blame] | 382 | /* Classic BPF related prologue emission. */ |
| 383 | if (new_insn) { |
| 384 | /* Classic BPF expects A and X to be reset first. These need |
| 385 | * to be guaranteed to be the first two instructions. |
| 386 | */ |
| 387 | *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); |
| 388 | *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); |
| 389 | |
| 390 | /* All programs must keep CTX in callee saved BPF_REG_CTX. |
| 391 | * In eBPF case it's done by the compiler, here we need to |
| 392 | * do this ourself. Initial CTX is present in BPF_REG_ARG1. |
| 393 | */ |
| 394 | *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); |
| 395 | } else { |
| 396 | new_insn += 3; |
| 397 | } |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 398 | |
| 399 | for (i = 0; i < len; fp++, i++) { |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 400 | struct bpf_insn tmp_insns[6] = { }; |
| 401 | struct bpf_insn *insn = tmp_insns; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 402 | |
| 403 | if (addrs) |
| 404 | addrs[i] = new_insn - new_prog; |
| 405 | |
| 406 | switch (fp->code) { |
| 407 | /* All arithmetic insns and skb loads map as-is. */ |
| 408 | case BPF_ALU | BPF_ADD | BPF_X: |
| 409 | case BPF_ALU | BPF_ADD | BPF_K: |
| 410 | case BPF_ALU | BPF_SUB | BPF_X: |
| 411 | case BPF_ALU | BPF_SUB | BPF_K: |
| 412 | case BPF_ALU | BPF_AND | BPF_X: |
| 413 | case BPF_ALU | BPF_AND | BPF_K: |
| 414 | case BPF_ALU | BPF_OR | BPF_X: |
| 415 | case BPF_ALU | BPF_OR | BPF_K: |
| 416 | case BPF_ALU | BPF_LSH | BPF_X: |
| 417 | case BPF_ALU | BPF_LSH | BPF_K: |
| 418 | case BPF_ALU | BPF_RSH | BPF_X: |
| 419 | case BPF_ALU | BPF_RSH | BPF_K: |
| 420 | case BPF_ALU | BPF_XOR | BPF_X: |
| 421 | case BPF_ALU | BPF_XOR | BPF_K: |
| 422 | case BPF_ALU | BPF_MUL | BPF_X: |
| 423 | case BPF_ALU | BPF_MUL | BPF_K: |
| 424 | case BPF_ALU | BPF_DIV | BPF_X: |
| 425 | case BPF_ALU | BPF_DIV | BPF_K: |
| 426 | case BPF_ALU | BPF_MOD | BPF_X: |
| 427 | case BPF_ALU | BPF_MOD | BPF_K: |
| 428 | case BPF_ALU | BPF_NEG: |
| 429 | case BPF_LD | BPF_ABS | BPF_W: |
| 430 | case BPF_LD | BPF_ABS | BPF_H: |
| 431 | case BPF_LD | BPF_ABS | BPF_B: |
| 432 | case BPF_LD | BPF_IND | BPF_W: |
| 433 | case BPF_LD | BPF_IND | BPF_H: |
| 434 | case BPF_LD | BPF_IND | BPF_B: |
| 435 | /* Check for overloaded BPF extension and |
| 436 | * directly convert it if found, otherwise |
| 437 | * just move on with mapping. |
| 438 | */ |
| 439 | if (BPF_CLASS(fp->code) == BPF_LD && |
| 440 | BPF_MODE(fp->code) == BPF_ABS && |
| 441 | convert_bpf_extensions(fp, &insn)) |
| 442 | break; |
| 443 | |
Alexei Starovoitov | 265d765 | 2018-01-29 02:49:00 +0100 | [diff] [blame] | 444 | if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || |
| 445 | fp->code == (BPF_ALU | BPF_MOD | BPF_X)) |
| 446 | *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); |
| 447 | |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 448 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 449 | break; |
| 450 | |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 451 | /* Jump transformation cannot use BPF block macros |
| 452 | * everywhere as offset calculation and target updates |
| 453 | * require a bit more work than the rest, i.e. jump |
| 454 | * opcodes map as-is, but offsets need adjustment. |
| 455 | */ |
| 456 | |
| 457 | #define BPF_EMIT_JMP \ |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 458 | do { \ |
| 459 | if (target >= len || target < 0) \ |
| 460 | goto err; \ |
| 461 | insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ |
| 462 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ |
| 463 | insn->off -= insn - tmp_insns; \ |
| 464 | } while (0) |
| 465 | |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 466 | case BPF_JMP | BPF_JA: |
| 467 | target = i + fp->k + 1; |
| 468 | insn->code = fp->code; |
| 469 | BPF_EMIT_JMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 470 | break; |
| 471 | |
| 472 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 473 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 474 | case BPF_JMP | BPF_JSET | BPF_K: |
| 475 | case BPF_JMP | BPF_JSET | BPF_X: |
| 476 | case BPF_JMP | BPF_JGT | BPF_K: |
| 477 | case BPF_JMP | BPF_JGT | BPF_X: |
| 478 | case BPF_JMP | BPF_JGE | BPF_K: |
| 479 | case BPF_JMP | BPF_JGE | BPF_X: |
| 480 | if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { |
| 481 | /* BPF immediates are signed, zero extend |
| 482 | * immediate into tmp register and use it |
| 483 | * in compare insn. |
| 484 | */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 485 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 486 | |
Alexei Starovoitov | e430f34 | 2014-06-06 14:46:06 -0700 | [diff] [blame] | 487 | insn->dst_reg = BPF_REG_A; |
| 488 | insn->src_reg = BPF_REG_TMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 489 | bpf_src = BPF_X; |
| 490 | } else { |
Alexei Starovoitov | e430f34 | 2014-06-06 14:46:06 -0700 | [diff] [blame] | 491 | insn->dst_reg = BPF_REG_A; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 492 | insn->imm = fp->k; |
| 493 | bpf_src = BPF_SRC(fp->code); |
Tycho Andersen | 19539ce | 2015-09-10 18:25:07 -0600 | [diff] [blame] | 494 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 495 | } |
| 496 | |
| 497 | /* Common case where 'jump_false' is next insn. */ |
| 498 | if (fp->jf == 0) { |
| 499 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; |
| 500 | target = i + fp->jt + 1; |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 501 | BPF_EMIT_JMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 502 | break; |
| 503 | } |
| 504 | |
| 505 | /* Convert JEQ into JNE when 'jump_true' is next insn. */ |
| 506 | if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { |
| 507 | insn->code = BPF_JMP | BPF_JNE | bpf_src; |
| 508 | target = i + fp->jf + 1; |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 509 | BPF_EMIT_JMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 510 | break; |
| 511 | } |
| 512 | |
| 513 | /* Other jumps are mapped into two insns: Jxx and JA. */ |
| 514 | target = i + fp->jt + 1; |
| 515 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 516 | BPF_EMIT_JMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 517 | insn++; |
| 518 | |
| 519 | insn->code = BPF_JMP | BPF_JA; |
| 520 | target = i + fp->jf + 1; |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 521 | BPF_EMIT_JMP; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 522 | break; |
| 523 | |
| 524 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ |
| 525 | case BPF_LDX | BPF_MSH | BPF_B: |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 526 | /* tmp = A */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 527 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); |
David S. Miller | 1268e25 | 2014-05-13 13:13:33 -0400 | [diff] [blame] | 528 | /* A = BPF_R0 = *(u8 *) (skb->data + K) */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 529 | *insn++ = BPF_LD_ABS(BPF_B, fp->k); |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 530 | /* A &= 0xf */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 531 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 532 | /* A <<= 2 */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 533 | *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 534 | /* X = A */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 535 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 536 | /* A = tmp */ |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 537 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 538 | break; |
| 539 | |
Daniel Borkmann | 6205b9c | 2016-02-19 23:05:27 +0100 | [diff] [blame] | 540 | /* RET_K is remaped into 2 insns. RET_A case doesn't need an |
| 541 | * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. |
| 542 | */ |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 543 | case BPF_RET | BPF_A: |
| 544 | case BPF_RET | BPF_K: |
Daniel Borkmann | 6205b9c | 2016-02-19 23:05:27 +0100 | [diff] [blame] | 545 | if (BPF_RVAL(fp->code) == BPF_K) |
| 546 | *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, |
| 547 | 0, fp->k); |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 548 | *insn = BPF_EXIT_INSN(); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 549 | break; |
| 550 | |
| 551 | /* Store to stack. */ |
| 552 | case BPF_ST: |
| 553 | case BPF_STX: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 554 | *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == |
| 555 | BPF_ST ? BPF_REG_A : BPF_REG_X, |
| 556 | -(BPF_MEMWORDS - fp->k) * 4); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 557 | break; |
| 558 | |
| 559 | /* Load from stack. */ |
| 560 | case BPF_LD | BPF_MEM: |
| 561 | case BPF_LDX | BPF_MEM: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 562 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
| 563 | BPF_REG_A : BPF_REG_X, BPF_REG_FP, |
| 564 | -(BPF_MEMWORDS - fp->k) * 4); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 565 | break; |
| 566 | |
| 567 | /* A = K or X = K */ |
| 568 | case BPF_LD | BPF_IMM: |
| 569 | case BPF_LDX | BPF_IMM: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 570 | *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? |
| 571 | BPF_REG_A : BPF_REG_X, fp->k); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 572 | break; |
| 573 | |
| 574 | /* X = A */ |
| 575 | case BPF_MISC | BPF_TAX: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 576 | *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 577 | break; |
| 578 | |
| 579 | /* A = X */ |
| 580 | case BPF_MISC | BPF_TXA: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 581 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 582 | break; |
| 583 | |
| 584 | /* A = skb->len or X = skb->len */ |
| 585 | case BPF_LD | BPF_W | BPF_LEN: |
| 586 | case BPF_LDX | BPF_W | BPF_LEN: |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 587 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
| 588 | BPF_REG_A : BPF_REG_X, BPF_REG_CTX, |
| 589 | offsetof(struct sk_buff, len)); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 590 | break; |
| 591 | |
Daniel Borkmann | f8f6d67 | 2014-05-29 10:22:51 +0200 | [diff] [blame] | 592 | /* Access seccomp_data fields. */ |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 593 | case BPF_LDX | BPF_ABS | BPF_W: |
Alexei Starovoitov | 9739eef | 2014-05-08 14:10:51 -0700 | [diff] [blame] | 594 | /* A = *(u32 *) (ctx + K) */ |
| 595 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 596 | break; |
| 597 | |
Stephen Hemminger | ca9f1fd | 2015-02-14 13:47:54 -0500 | [diff] [blame] | 598 | /* Unknown instruction. */ |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 599 | default: |
| 600 | goto err; |
| 601 | } |
| 602 | |
| 603 | insn++; |
| 604 | if (new_prog) |
| 605 | memcpy(new_insn, tmp_insns, |
| 606 | sizeof(*insn) * (insn - tmp_insns)); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 607 | new_insn += insn - tmp_insns; |
| 608 | } |
| 609 | |
| 610 | if (!new_prog) { |
| 611 | /* Only calculating new length. */ |
| 612 | *new_len = new_insn - new_prog; |
| 613 | return 0; |
| 614 | } |
| 615 | |
| 616 | pass++; |
| 617 | if (new_flen != new_insn - new_prog) { |
| 618 | new_flen = new_insn - new_prog; |
| 619 | if (pass > 2) |
| 620 | goto err; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 621 | goto do_pass; |
| 622 | } |
| 623 | |
| 624 | kfree(addrs); |
| 625 | BUG_ON(*new_len != new_flen); |
| 626 | return 0; |
| 627 | err: |
| 628 | kfree(addrs); |
| 629 | return -EINVAL; |
| 630 | } |
| 631 | |
| 632 | /* Security: |
| 633 | * |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 634 | * As we dont want to clear mem[] array for each packet going through |
Li RongQing | 8ea6e34 | 2014-10-10 13:56:51 +0800 | [diff] [blame] | 635 | * __bpf_prog_run(), we check that filter loaded by user never try to read |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 636 | * a cell if not previously written, and we check all branches to be sure |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 637 | * a malicious user doesn't try to abuse us. |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 638 | */ |
Eric Dumazet | ec31a05 | 2014-07-12 15:49:16 +0200 | [diff] [blame] | 639 | static int check_load_and_stores(const struct sock_filter *filter, int flen) |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 640 | { |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 641 | u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 642 | int pc, ret = 0; |
| 643 | |
| 644 | BUILD_BUG_ON(BPF_MEMWORDS > 16); |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 645 | |
Tobias Klauser | 99e72a0 | 2014-06-24 15:33:22 +0200 | [diff] [blame] | 646 | masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 647 | if (!masks) |
| 648 | return -ENOMEM; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 649 | |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 650 | memset(masks, 0xff, flen * sizeof(*masks)); |
| 651 | |
| 652 | for (pc = 0; pc < flen; pc++) { |
| 653 | memvalid &= masks[pc]; |
| 654 | |
| 655 | switch (filter[pc].code) { |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 656 | case BPF_ST: |
| 657 | case BPF_STX: |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 658 | memvalid |= (1 << filter[pc].k); |
| 659 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 660 | case BPF_LD | BPF_MEM: |
| 661 | case BPF_LDX | BPF_MEM: |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 662 | if (!(memvalid & (1 << filter[pc].k))) { |
| 663 | ret = -EINVAL; |
| 664 | goto error; |
| 665 | } |
| 666 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 667 | case BPF_JMP | BPF_JA: |
| 668 | /* A jump must set masks on target */ |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 669 | masks[pc + 1 + filter[pc].k] &= memvalid; |
| 670 | memvalid = ~0; |
| 671 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 672 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 673 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 674 | case BPF_JMP | BPF_JGE | BPF_K: |
| 675 | case BPF_JMP | BPF_JGE | BPF_X: |
| 676 | case BPF_JMP | BPF_JGT | BPF_K: |
| 677 | case BPF_JMP | BPF_JGT | BPF_X: |
| 678 | case BPF_JMP | BPF_JSET | BPF_K: |
| 679 | case BPF_JMP | BPF_JSET | BPF_X: |
| 680 | /* A jump must set masks on targets */ |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 681 | masks[pc + 1 + filter[pc].jt] &= memvalid; |
| 682 | masks[pc + 1 + filter[pc].jf] &= memvalid; |
| 683 | memvalid = ~0; |
| 684 | break; |
| 685 | } |
| 686 | } |
| 687 | error: |
| 688 | kfree(masks); |
| 689 | return ret; |
| 690 | } |
| 691 | |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 692 | static bool chk_code_allowed(u16 code_to_probe) |
| 693 | { |
| 694 | static const bool codes[] = { |
| 695 | /* 32 bit ALU operations */ |
| 696 | [BPF_ALU | BPF_ADD | BPF_K] = true, |
| 697 | [BPF_ALU | BPF_ADD | BPF_X] = true, |
| 698 | [BPF_ALU | BPF_SUB | BPF_K] = true, |
| 699 | [BPF_ALU | BPF_SUB | BPF_X] = true, |
| 700 | [BPF_ALU | BPF_MUL | BPF_K] = true, |
| 701 | [BPF_ALU | BPF_MUL | BPF_X] = true, |
| 702 | [BPF_ALU | BPF_DIV | BPF_K] = true, |
| 703 | [BPF_ALU | BPF_DIV | BPF_X] = true, |
| 704 | [BPF_ALU | BPF_MOD | BPF_K] = true, |
| 705 | [BPF_ALU | BPF_MOD | BPF_X] = true, |
| 706 | [BPF_ALU | BPF_AND | BPF_K] = true, |
| 707 | [BPF_ALU | BPF_AND | BPF_X] = true, |
| 708 | [BPF_ALU | BPF_OR | BPF_K] = true, |
| 709 | [BPF_ALU | BPF_OR | BPF_X] = true, |
| 710 | [BPF_ALU | BPF_XOR | BPF_K] = true, |
| 711 | [BPF_ALU | BPF_XOR | BPF_X] = true, |
| 712 | [BPF_ALU | BPF_LSH | BPF_K] = true, |
| 713 | [BPF_ALU | BPF_LSH | BPF_X] = true, |
| 714 | [BPF_ALU | BPF_RSH | BPF_K] = true, |
| 715 | [BPF_ALU | BPF_RSH | BPF_X] = true, |
| 716 | [BPF_ALU | BPF_NEG] = true, |
| 717 | /* Load instructions */ |
| 718 | [BPF_LD | BPF_W | BPF_ABS] = true, |
| 719 | [BPF_LD | BPF_H | BPF_ABS] = true, |
| 720 | [BPF_LD | BPF_B | BPF_ABS] = true, |
| 721 | [BPF_LD | BPF_W | BPF_LEN] = true, |
| 722 | [BPF_LD | BPF_W | BPF_IND] = true, |
| 723 | [BPF_LD | BPF_H | BPF_IND] = true, |
| 724 | [BPF_LD | BPF_B | BPF_IND] = true, |
| 725 | [BPF_LD | BPF_IMM] = true, |
| 726 | [BPF_LD | BPF_MEM] = true, |
| 727 | [BPF_LDX | BPF_W | BPF_LEN] = true, |
| 728 | [BPF_LDX | BPF_B | BPF_MSH] = true, |
| 729 | [BPF_LDX | BPF_IMM] = true, |
| 730 | [BPF_LDX | BPF_MEM] = true, |
| 731 | /* Store instructions */ |
| 732 | [BPF_ST] = true, |
| 733 | [BPF_STX] = true, |
| 734 | /* Misc instructions */ |
| 735 | [BPF_MISC | BPF_TAX] = true, |
| 736 | [BPF_MISC | BPF_TXA] = true, |
| 737 | /* Return instructions */ |
| 738 | [BPF_RET | BPF_K] = true, |
| 739 | [BPF_RET | BPF_A] = true, |
| 740 | /* Jump instructions */ |
| 741 | [BPF_JMP | BPF_JA] = true, |
| 742 | [BPF_JMP | BPF_JEQ | BPF_K] = true, |
| 743 | [BPF_JMP | BPF_JEQ | BPF_X] = true, |
| 744 | [BPF_JMP | BPF_JGE | BPF_K] = true, |
| 745 | [BPF_JMP | BPF_JGE | BPF_X] = true, |
| 746 | [BPF_JMP | BPF_JGT | BPF_K] = true, |
| 747 | [BPF_JMP | BPF_JGT | BPF_X] = true, |
| 748 | [BPF_JMP | BPF_JSET | BPF_K] = true, |
| 749 | [BPF_JMP | BPF_JSET | BPF_X] = true, |
| 750 | }; |
| 751 | |
| 752 | if (code_to_probe >= ARRAY_SIZE(codes)) |
| 753 | return false; |
| 754 | |
| 755 | return codes[code_to_probe]; |
| 756 | } |
| 757 | |
Daniel Borkmann | f7bd9e3 | 2016-06-10 21:19:07 +0200 | [diff] [blame] | 758 | static bool bpf_check_basics_ok(const struct sock_filter *filter, |
| 759 | unsigned int flen) |
| 760 | { |
| 761 | if (filter == NULL) |
| 762 | return false; |
| 763 | if (flen == 0 || flen > BPF_MAXINSNS) |
| 764 | return false; |
| 765 | |
| 766 | return true; |
| 767 | } |
| 768 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | /** |
Alexei Starovoitov | 4df95ff | 2014-07-30 20:34:14 -0700 | [diff] [blame] | 770 | * bpf_check_classic - verify socket filter code |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | * @filter: filter to verify |
| 772 | * @flen: length of filter |
| 773 | * |
| 774 | * Check the user's filter code. If we let some ugly |
| 775 | * filter code slip through kaboom! The filter must contain |
Kris Katterjohn | 9369986 | 2006-01-04 13:58:36 -0800 | [diff] [blame] | 776 | * no references or jumps that are out of range, no illegal |
| 777 | * instructions, and must end with a RET instruction. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | * |
Kris Katterjohn | 7b11f69 | 2006-01-13 14:33:06 -0800 | [diff] [blame] | 779 | * All jumps are forward as they are not signed. |
| 780 | * |
| 781 | * Returns 0 if the rule set is legal or -EINVAL if not. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | */ |
Nicolas Schichan | d9e12f4 | 2015-05-06 16:12:28 +0200 | [diff] [blame] | 783 | static int bpf_check_classic(const struct sock_filter *filter, |
| 784 | unsigned int flen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | { |
Daniel Borkmann | aa1113d | 2012-12-28 10:50:17 +0000 | [diff] [blame] | 786 | bool anc_found; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 787 | int pc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 789 | /* Check the filter code now */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | for (pc = 0; pc < flen; pc++) { |
Eric Dumazet | ec31a05 | 2014-07-12 15:49:16 +0200 | [diff] [blame] | 791 | const struct sock_filter *ftest = &filter[pc]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 793 | /* May we actually operate on this code? */ |
| 794 | if (!chk_code_allowed(ftest->code)) |
Tetsuo Handa | cba328f | 2010-11-16 15:19:51 +0000 | [diff] [blame] | 795 | return -EINVAL; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 796 | |
Kris Katterjohn | 9369986 | 2006-01-04 13:58:36 -0800 | [diff] [blame] | 797 | /* Some instructions need special checks */ |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 798 | switch (ftest->code) { |
| 799 | case BPF_ALU | BPF_DIV | BPF_K: |
| 800 | case BPF_ALU | BPF_MOD | BPF_K: |
| 801 | /* Check for division by zero */ |
Eric Dumazet | b6069a9 | 2012-09-07 22:03:35 +0000 | [diff] [blame] | 802 | if (ftest->k == 0) |
| 803 | return -EINVAL; |
| 804 | break; |
Rabin Vincent | 229394e | 2016-01-12 20:17:08 +0100 | [diff] [blame] | 805 | case BPF_ALU | BPF_LSH | BPF_K: |
| 806 | case BPF_ALU | BPF_RSH | BPF_K: |
| 807 | if (ftest->k >= 32) |
| 808 | return -EINVAL; |
| 809 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 810 | case BPF_LD | BPF_MEM: |
| 811 | case BPF_LDX | BPF_MEM: |
| 812 | case BPF_ST: |
| 813 | case BPF_STX: |
| 814 | /* Check for invalid memory addresses */ |
Kris Katterjohn | 9369986 | 2006-01-04 13:58:36 -0800 | [diff] [blame] | 815 | if (ftest->k >= BPF_MEMWORDS) |
| 816 | return -EINVAL; |
Hagen Paul Pfeifer | 01f2f3f | 2010-06-19 17:05:36 +0000 | [diff] [blame] | 817 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 818 | case BPF_JMP | BPF_JA: |
| 819 | /* Note, the large ftest->k might cause loops. |
Kris Katterjohn | 9369986 | 2006-01-04 13:58:36 -0800 | [diff] [blame] | 820 | * Compare this with conditional jumps below, |
| 821 | * where offsets are limited. --ANK (981016) |
| 822 | */ |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 823 | if (ftest->k >= (unsigned int)(flen - pc - 1)) |
Kris Katterjohn | 9369986 | 2006-01-04 13:58:36 -0800 | [diff] [blame] | 824 | return -EINVAL; |
| 825 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 826 | case BPF_JMP | BPF_JEQ | BPF_K: |
| 827 | case BPF_JMP | BPF_JEQ | BPF_X: |
| 828 | case BPF_JMP | BPF_JGE | BPF_K: |
| 829 | case BPF_JMP | BPF_JGE | BPF_X: |
| 830 | case BPF_JMP | BPF_JGT | BPF_K: |
| 831 | case BPF_JMP | BPF_JGT | BPF_X: |
| 832 | case BPF_JMP | BPF_JSET | BPF_K: |
| 833 | case BPF_JMP | BPF_JSET | BPF_X: |
| 834 | /* Both conditionals must be safe */ |
Hagen Paul Pfeifer | 01f2f3f | 2010-06-19 17:05:36 +0000 | [diff] [blame] | 835 | if (pc + ftest->jt + 1 >= flen || |
| 836 | pc + ftest->jf + 1 >= flen) |
| 837 | return -EINVAL; |
Tetsuo Handa | cba328f | 2010-11-16 15:19:51 +0000 | [diff] [blame] | 838 | break; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 839 | case BPF_LD | BPF_W | BPF_ABS: |
| 840 | case BPF_LD | BPF_H | BPF_ABS: |
| 841 | case BPF_LD | BPF_B | BPF_ABS: |
Daniel Borkmann | aa1113d | 2012-12-28 10:50:17 +0000 | [diff] [blame] | 842 | anc_found = false; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 843 | if (bpf_anc_helper(ftest) & BPF_ANC) |
| 844 | anc_found = true; |
| 845 | /* Ancillary operation unknown or unsupported */ |
Daniel Borkmann | aa1113d | 2012-12-28 10:50:17 +0000 | [diff] [blame] | 846 | if (anc_found == false && ftest->k >= SKF_AD_OFF) |
| 847 | return -EINVAL; |
Hagen Paul Pfeifer | 01f2f3f | 2010-06-19 17:05:36 +0000 | [diff] [blame] | 848 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | } |
| 850 | |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 851 | /* Last instruction must be a RET code */ |
Hagen Paul Pfeifer | 01f2f3f | 2010-06-19 17:05:36 +0000 | [diff] [blame] | 852 | switch (filter[flen - 1].code) { |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 853 | case BPF_RET | BPF_K: |
| 854 | case BPF_RET | BPF_A: |
Eric Dumazet | 2d5311e | 2010-12-01 20:46:24 +0000 | [diff] [blame] | 855 | return check_load_and_stores(filter, flen); |
Tetsuo Handa | cba328f | 2010-11-16 15:19:51 +0000 | [diff] [blame] | 856 | } |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 857 | |
Tetsuo Handa | cba328f | 2010-11-16 15:19:51 +0000 | [diff] [blame] | 858 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | } |
| 860 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 861 | static int bpf_prog_store_orig_filter(struct bpf_prog *fp, |
| 862 | const struct sock_fprog *fprog) |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 863 | { |
Alexei Starovoitov | 009937e | 2014-07-30 20:34:13 -0700 | [diff] [blame] | 864 | unsigned int fsize = bpf_classic_proglen(fprog); |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 865 | struct sock_fprog_kern *fkprog; |
| 866 | |
| 867 | fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); |
| 868 | if (!fp->orig_prog) |
| 869 | return -ENOMEM; |
| 870 | |
| 871 | fkprog = fp->orig_prog; |
| 872 | fkprog->len = fprog->len; |
Daniel Borkmann | 658da93 | 2015-05-06 16:12:29 +0200 | [diff] [blame] | 873 | |
| 874 | fkprog->filter = kmemdup(fp->insns, fsize, |
| 875 | GFP_KERNEL | __GFP_NOWARN); |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 876 | if (!fkprog->filter) { |
| 877 | kfree(fp->orig_prog); |
| 878 | return -ENOMEM; |
| 879 | } |
| 880 | |
| 881 | return 0; |
| 882 | } |
| 883 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 884 | static void bpf_release_orig_filter(struct bpf_prog *fp) |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 885 | { |
| 886 | struct sock_fprog_kern *fprog = fp->orig_prog; |
| 887 | |
| 888 | if (fprog) { |
| 889 | kfree(fprog->filter); |
| 890 | kfree(fprog); |
| 891 | } |
| 892 | } |
| 893 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 894 | static void __bpf_prog_release(struct bpf_prog *prog) |
| 895 | { |
Daniel Borkmann | 24701ec | 2015-03-01 12:31:47 +0100 | [diff] [blame] | 896 | if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 897 | bpf_prog_put(prog); |
| 898 | } else { |
| 899 | bpf_release_orig_filter(prog); |
| 900 | bpf_prog_free(prog); |
| 901 | } |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 902 | } |
| 903 | |
Pablo Neira | 34c5bd6 | 2014-07-29 17:36:28 +0200 | [diff] [blame] | 904 | static void __sk_filter_release(struct sk_filter *fp) |
| 905 | { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 906 | __bpf_prog_release(fp->prog); |
| 907 | kfree(fp); |
Pablo Neira | 34c5bd6 | 2014-07-29 17:36:28 +0200 | [diff] [blame] | 908 | } |
| 909 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | /** |
Eric Dumazet | 46bcf14 | 2010-12-06 09:29:43 -0800 | [diff] [blame] | 911 | * sk_filter_release_rcu - Release a socket filter by rcu_head |
Pavel Emelyanov | 47e958e | 2007-10-17 21:22:42 -0700 | [diff] [blame] | 912 | * @rcu: rcu_head that contains the sk_filter to free |
| 913 | */ |
Daniel Borkmann | fbc907f | 2014-03-28 18:58:20 +0100 | [diff] [blame] | 914 | static void sk_filter_release_rcu(struct rcu_head *rcu) |
Pavel Emelyanov | 47e958e | 2007-10-17 21:22:42 -0700 | [diff] [blame] | 915 | { |
| 916 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); |
| 917 | |
Pablo Neira | 34c5bd6 | 2014-07-29 17:36:28 +0200 | [diff] [blame] | 918 | __sk_filter_release(fp); |
Pavel Emelyanov | 47e958e | 2007-10-17 21:22:42 -0700 | [diff] [blame] | 919 | } |
Daniel Borkmann | fbc907f | 2014-03-28 18:58:20 +0100 | [diff] [blame] | 920 | |
| 921 | /** |
| 922 | * sk_filter_release - release a socket filter |
| 923 | * @fp: filter to remove |
| 924 | * |
| 925 | * Remove a filter from a socket and release its resources. |
| 926 | */ |
| 927 | static void sk_filter_release(struct sk_filter *fp) |
| 928 | { |
| 929 | if (atomic_dec_and_test(&fp->refcnt)) |
| 930 | call_rcu(&fp->rcu, sk_filter_release_rcu); |
| 931 | } |
| 932 | |
| 933 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) |
| 934 | { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 935 | u32 filter_size = bpf_prog_size(fp->prog->len); |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 936 | |
| 937 | atomic_sub(filter_size, &sk->sk_omem_alloc); |
Daniel Borkmann | fbc907f | 2014-03-28 18:58:20 +0100 | [diff] [blame] | 938 | sk_filter_release(fp); |
| 939 | } |
| 940 | |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 941 | /* try to charge the socket memory if there is space available |
| 942 | * return true on success |
| 943 | */ |
Greg Kroah-Hartman | 0044962 | 2017-10-12 21:21:39 +0200 | [diff] [blame] | 944 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
Daniel Borkmann | fbc907f | 2014-03-28 18:58:20 +0100 | [diff] [blame] | 945 | { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 946 | u32 filter_size = bpf_prog_size(fp->prog->len); |
Pavel Emelyanov | 47e958e | 2007-10-17 21:22:42 -0700 | [diff] [blame] | 947 | |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 948 | /* same check as in sock_kmalloc() */ |
| 949 | if (filter_size <= sysctl_optmem_max && |
| 950 | atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { |
Greg Kroah-Hartman | 0044962 | 2017-10-12 21:21:39 +0200 | [diff] [blame] | 951 | atomic_inc(&fp->refcnt); |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 952 | atomic_add(filter_size, &sk->sk_omem_alloc); |
| 953 | return true; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 954 | } |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 955 | return false; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 956 | } |
| 957 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 958 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 959 | { |
| 960 | struct sock_filter *old_prog; |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 961 | struct bpf_prog *old_fp; |
Daniel Borkmann | 3480593 | 2014-05-29 10:22:50 +0200 | [diff] [blame] | 962 | int err, new_len, old_len = fp->len; |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 963 | |
| 964 | /* We are free to overwrite insns et al right here as it |
| 965 | * won't be used at this point in time anymore internally |
| 966 | * after the migration to the internal BPF instruction |
| 967 | * representation. |
| 968 | */ |
| 969 | BUILD_BUG_ON(sizeof(struct sock_filter) != |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 970 | sizeof(struct bpf_insn)); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 971 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 972 | /* Conversion cannot happen on overlapping memory areas, |
| 973 | * so we need to keep the user BPF around until the 2nd |
| 974 | * pass. At this time, the user BPF is stored in fp->insns. |
| 975 | */ |
| 976 | old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), |
Daniel Borkmann | 658da93 | 2015-05-06 16:12:29 +0200 | [diff] [blame] | 977 | GFP_KERNEL | __GFP_NOWARN); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 978 | if (!old_prog) { |
| 979 | err = -ENOMEM; |
| 980 | goto out_err; |
| 981 | } |
| 982 | |
| 983 | /* 1st pass: calculate the new program length. */ |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 984 | err = bpf_convert_filter(old_prog, old_len, NULL, &new_len); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 985 | if (err) |
| 986 | goto out_err_free; |
| 987 | |
| 988 | /* Expand fp for appending the new filter representation. */ |
| 989 | old_fp = fp; |
Daniel Borkmann | 60a3b22 | 2014-09-02 22:53:44 +0200 | [diff] [blame] | 990 | fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 991 | if (!fp) { |
| 992 | /* The old_fp is still around in case we couldn't |
| 993 | * allocate new memory, so uncharge on that one. |
| 994 | */ |
| 995 | fp = old_fp; |
| 996 | err = -ENOMEM; |
| 997 | goto out_err_free; |
| 998 | } |
| 999 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1000 | fp->len = new_len; |
| 1001 | |
Alexei Starovoitov | 2695fb5 | 2014-07-24 16:38:21 -0700 | [diff] [blame] | 1002 | /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 1003 | err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1004 | if (err) |
Alexei Starovoitov | 8fb575c | 2014-07-30 20:34:15 -0700 | [diff] [blame] | 1005 | /* 2nd bpf_convert_filter() can fail only if it fails |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1006 | * to allocate memory, remapping must succeed. Note, |
| 1007 | * that at this time old_fp has already been released |
Alexei Starovoitov | 278571b | 2014-07-30 20:34:12 -0700 | [diff] [blame] | 1008 | * by krealloc(). |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1009 | */ |
| 1010 | goto out_err_free; |
| 1011 | |
Daniel Borkmann | d1c55ab | 2016-05-13 19:08:31 +0200 | [diff] [blame] | 1012 | fp = bpf_prog_select_runtime(fp, &err); |
Alexei Starovoitov | a3d6dd6 | 2018-01-29 02:48:56 +0100 | [diff] [blame] | 1013 | if (err) |
| 1014 | goto out_err_free; |
Alexei Starovoitov | 5fe821a | 2014-05-19 14:56:14 -0700 | [diff] [blame] | 1015 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1016 | kfree(old_prog); |
| 1017 | return fp; |
| 1018 | |
| 1019 | out_err_free: |
| 1020 | kfree(old_prog); |
| 1021 | out_err: |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1022 | __bpf_prog_release(fp); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1023 | return ERR_PTR(err); |
| 1024 | } |
| 1025 | |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1026 | static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, |
| 1027 | bpf_aux_classic_check_t trans) |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1028 | { |
| 1029 | int err; |
| 1030 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1031 | fp->bpf_func = NULL; |
Daniel Borkmann | a91263d | 2015-09-30 01:41:50 +0200 | [diff] [blame] | 1032 | fp->jited = 0; |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1033 | |
Alexei Starovoitov | 4df95ff | 2014-07-30 20:34:14 -0700 | [diff] [blame] | 1034 | err = bpf_check_classic(fp->insns, fp->len); |
Leon Yu | 418c96a | 2014-06-01 05:37:25 +0000 | [diff] [blame] | 1035 | if (err) { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1036 | __bpf_prog_release(fp); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1037 | return ERR_PTR(err); |
Leon Yu | 418c96a | 2014-06-01 05:37:25 +0000 | [diff] [blame] | 1038 | } |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1039 | |
Nicolas Schichan | 4ae92bc | 2015-05-06 16:12:27 +0200 | [diff] [blame] | 1040 | /* There might be additional checks and transformations |
| 1041 | * needed on classic filters, f.e. in case of seccomp. |
| 1042 | */ |
| 1043 | if (trans) { |
| 1044 | err = trans(fp->insns, fp->len); |
| 1045 | if (err) { |
| 1046 | __bpf_prog_release(fp); |
| 1047 | return ERR_PTR(err); |
| 1048 | } |
| 1049 | } |
| 1050 | |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1051 | /* Probe if we can JIT compile the filter and if so, do |
| 1052 | * the compilation of the filter. |
| 1053 | */ |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1054 | bpf_jit_compile(fp); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1055 | |
| 1056 | /* JIT compiler couldn't process this filter, so do the |
| 1057 | * internal BPF translation for the optimized interpreter. |
| 1058 | */ |
Alexei Starovoitov | 5fe821a | 2014-05-19 14:56:14 -0700 | [diff] [blame] | 1059 | if (!fp->jited) |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1060 | fp = bpf_migrate_filter(fp); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1061 | |
| 1062 | return fp; |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1063 | } |
| 1064 | |
| 1065 | /** |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1066 | * bpf_prog_create - create an unattached filter |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 1067 | * @pfp: the unattached filter that is created |
Tobias Klauser | 677a9fd | 2014-06-24 15:33:21 +0200 | [diff] [blame] | 1068 | * @fprog: the filter program |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1069 | * |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 1070 | * Create a filter independent of any socket. We first run some |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1071 | * sanity checks on it to make sure it does not explode on us later. |
| 1072 | * If an error occurs or there is insufficient memory for the filter |
| 1073 | * a negative errno code is returned. On success the return is zero. |
| 1074 | */ |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1075 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1076 | { |
Alexei Starovoitov | 009937e | 2014-07-30 20:34:13 -0700 | [diff] [blame] | 1077 | unsigned int fsize = bpf_classic_proglen(fprog); |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1078 | struct bpf_prog *fp; |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1079 | |
| 1080 | /* Make sure new filter is there and in the right amounts. */ |
Daniel Borkmann | f7bd9e3 | 2016-06-10 21:19:07 +0200 | [diff] [blame] | 1081 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1082 | return -EINVAL; |
| 1083 | |
Daniel Borkmann | 60a3b22 | 2014-09-02 22:53:44 +0200 | [diff] [blame] | 1084 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1085 | if (!fp) |
| 1086 | return -ENOMEM; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 1087 | |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1088 | memcpy(fp->insns, fprog->filter, fsize); |
| 1089 | |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1090 | fp->len = fprog->len; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 1091 | /* Since unattached filters are not copied back to user |
| 1092 | * space through sk_get_filter(), we do not need to hold |
| 1093 | * a copy here, and can spare us the work. |
| 1094 | */ |
| 1095 | fp->orig_prog = NULL; |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1096 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1097 | /* bpf_prepare_filter() already takes care of freeing |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1098 | * memory in case something goes wrong. |
| 1099 | */ |
Nicolas Schichan | 4ae92bc | 2015-05-06 16:12:27 +0200 | [diff] [blame] | 1100 | fp = bpf_prepare_filter(fp, NULL); |
Alexei Starovoitov | bd4cf0e | 2014-03-28 18:58:25 +0100 | [diff] [blame] | 1101 | if (IS_ERR(fp)) |
| 1102 | return PTR_ERR(fp); |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1103 | |
| 1104 | *pfp = fp; |
| 1105 | return 0; |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1106 | } |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1107 | EXPORT_SYMBOL_GPL(bpf_prog_create); |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1108 | |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1109 | /** |
| 1110 | * bpf_prog_create_from_user - create an unattached filter from user buffer |
| 1111 | * @pfp: the unattached filter that is created |
| 1112 | * @fprog: the filter program |
| 1113 | * @trans: post-classic verifier transformation handler |
Daniel Borkmann | bab1899 | 2015-10-02 15:17:33 +0200 | [diff] [blame] | 1114 | * @save_orig: save classic BPF program |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1115 | * |
| 1116 | * This function effectively does the same as bpf_prog_create(), only |
| 1117 | * that it builds up its insns buffer from user space provided buffer. |
| 1118 | * It also allows for passing a bpf_aux_classic_check_t handler. |
| 1119 | */ |
| 1120 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
Daniel Borkmann | bab1899 | 2015-10-02 15:17:33 +0200 | [diff] [blame] | 1121 | bpf_aux_classic_check_t trans, bool save_orig) |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1122 | { |
| 1123 | unsigned int fsize = bpf_classic_proglen(fprog); |
| 1124 | struct bpf_prog *fp; |
Daniel Borkmann | bab1899 | 2015-10-02 15:17:33 +0200 | [diff] [blame] | 1125 | int err; |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1126 | |
| 1127 | /* Make sure new filter is there and in the right amounts. */ |
Daniel Borkmann | f7bd9e3 | 2016-06-10 21:19:07 +0200 | [diff] [blame] | 1128 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1129 | return -EINVAL; |
| 1130 | |
| 1131 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
| 1132 | if (!fp) |
| 1133 | return -ENOMEM; |
| 1134 | |
| 1135 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { |
| 1136 | __bpf_prog_free(fp); |
| 1137 | return -EFAULT; |
| 1138 | } |
| 1139 | |
| 1140 | fp->len = fprog->len; |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1141 | fp->orig_prog = NULL; |
| 1142 | |
Daniel Borkmann | bab1899 | 2015-10-02 15:17:33 +0200 | [diff] [blame] | 1143 | if (save_orig) { |
| 1144 | err = bpf_prog_store_orig_filter(fp, fprog); |
| 1145 | if (err) { |
| 1146 | __bpf_prog_free(fp); |
| 1147 | return -ENOMEM; |
| 1148 | } |
| 1149 | } |
| 1150 | |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1151 | /* bpf_prepare_filter() already takes care of freeing |
| 1152 | * memory in case something goes wrong. |
| 1153 | */ |
| 1154 | fp = bpf_prepare_filter(fp, trans); |
| 1155 | if (IS_ERR(fp)) |
| 1156 | return PTR_ERR(fp); |
| 1157 | |
| 1158 | *pfp = fp; |
| 1159 | return 0; |
| 1160 | } |
David S. Miller | 2ea273d | 2015-08-17 14:37:06 -0700 | [diff] [blame] | 1161 | EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); |
Daniel Borkmann | ac67eb2 | 2015-05-06 16:12:30 +0200 | [diff] [blame] | 1162 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1163 | void bpf_prog_destroy(struct bpf_prog *fp) |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1164 | { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1165 | __bpf_prog_release(fp); |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1166 | } |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1167 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
Jiri Pirko | 302d663 | 2012-03-31 11:01:19 +0000 | [diff] [blame] | 1168 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1169 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1170 | { |
| 1171 | struct sk_filter *fp, *old_fp; |
| 1172 | |
| 1173 | fp = kmalloc(sizeof(*fp), GFP_KERNEL); |
| 1174 | if (!fp) |
| 1175 | return -ENOMEM; |
| 1176 | |
| 1177 | fp->prog = prog; |
| 1178 | atomic_set(&fp->refcnt, 0); |
| 1179 | |
| 1180 | if (!sk_filter_charge(sk, fp)) { |
| 1181 | kfree(fp); |
| 1182 | return -ENOMEM; |
| 1183 | } |
| 1184 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1185 | old_fp = rcu_dereference_protected(sk->sk_filter, |
| 1186 | lockdep_sock_is_held(sk)); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1187 | rcu_assign_pointer(sk->sk_filter, fp); |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1188 | |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1189 | if (old_fp) |
| 1190 | sk_filter_uncharge(sk, old_fp); |
| 1191 | |
| 1192 | return 0; |
| 1193 | } |
| 1194 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1195 | static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk) |
| 1196 | { |
| 1197 | struct bpf_prog *old_prog; |
| 1198 | int err; |
| 1199 | |
| 1200 | if (bpf_prog_size(prog->len) > sysctl_optmem_max) |
| 1201 | return -ENOMEM; |
| 1202 | |
Craig Gallek | fa46349 | 2016-02-10 11:50:39 -0500 | [diff] [blame] | 1203 | if (sk_unhashed(sk) && sk->sk_reuseport) { |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1204 | err = reuseport_alloc(sk); |
| 1205 | if (err) |
| 1206 | return err; |
| 1207 | } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { |
| 1208 | /* The socket wasn't bound with SO_REUSEPORT */ |
| 1209 | return -EINVAL; |
| 1210 | } |
| 1211 | |
| 1212 | old_prog = reuseport_attach_prog(sk, prog); |
| 1213 | if (old_prog) |
| 1214 | bpf_prog_destroy(old_prog); |
| 1215 | |
| 1216 | return 0; |
| 1217 | } |
| 1218 | |
| 1219 | static |
| 1220 | struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) |
| 1221 | { |
| 1222 | unsigned int fsize = bpf_classic_proglen(fprog); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1223 | struct bpf_prog *prog; |
| 1224 | int err; |
| 1225 | |
| 1226 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
| 1227 | return ERR_PTR(-EPERM); |
| 1228 | |
| 1229 | /* Make sure new filter is there and in the right amounts. */ |
Daniel Borkmann | f7bd9e3 | 2016-06-10 21:19:07 +0200 | [diff] [blame] | 1230 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1231 | return ERR_PTR(-EINVAL); |
| 1232 | |
Daniel Borkmann | f7bd9e3 | 2016-06-10 21:19:07 +0200 | [diff] [blame] | 1233 | prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1234 | if (!prog) |
| 1235 | return ERR_PTR(-ENOMEM); |
| 1236 | |
| 1237 | if (copy_from_user(prog->insns, fprog->filter, fsize)) { |
| 1238 | __bpf_prog_free(prog); |
| 1239 | return ERR_PTR(-EFAULT); |
| 1240 | } |
| 1241 | |
| 1242 | prog->len = fprog->len; |
| 1243 | |
| 1244 | err = bpf_prog_store_orig_filter(prog, fprog); |
| 1245 | if (err) { |
| 1246 | __bpf_prog_free(prog); |
| 1247 | return ERR_PTR(-ENOMEM); |
| 1248 | } |
| 1249 | |
| 1250 | /* bpf_prepare_filter() already takes care of freeing |
| 1251 | * memory in case something goes wrong. |
| 1252 | */ |
| 1253 | return bpf_prepare_filter(prog, NULL); |
| 1254 | } |
| 1255 | |
Pavel Emelyanov | 47e958e | 2007-10-17 21:22:42 -0700 | [diff] [blame] | 1256 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | * sk_attach_filter - attach a socket filter |
| 1258 | * @fprog: the filter program |
| 1259 | * @sk: the socket to use |
| 1260 | * |
| 1261 | * Attach the user's filter code. We first run some sanity checks on |
| 1262 | * it to make sure it does not explode on us later. If an error |
| 1263 | * occurs or there is insufficient memory for the filter a negative |
| 1264 | * errno code is returned. On success the return is zero. |
| 1265 | */ |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1266 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | { |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1268 | struct bpf_prog *prog = __get_filter(fprog, sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | int err; |
| 1270 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1271 | if (IS_ERR(prog)) |
| 1272 | return PTR_ERR(prog); |
| 1273 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1274 | err = __sk_attach_prog(prog, sk); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1275 | if (err < 0) { |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1276 | __bpf_prog_release(prog); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1277 | return err; |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 1278 | } |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 1279 | |
Pavel Emelyanov | d3904b7 | 2007-10-17 21:22:17 -0700 | [diff] [blame] | 1280 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | } |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1282 | EXPORT_SYMBOL_GPL(sk_attach_filter); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1284 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1285 | { |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1286 | struct bpf_prog *prog = __get_filter(fprog, sk); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1287 | int err; |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1288 | |
Alexei Starovoitov | 198bf1b | 2014-12-10 20:14:55 -0800 | [diff] [blame] | 1289 | if (IS_ERR(prog)) |
| 1290 | return PTR_ERR(prog); |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1291 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1292 | err = __reuseport_attach_prog(prog, sk); |
| 1293 | if (err < 0) { |
| 1294 | __bpf_prog_release(prog); |
| 1295 | return err; |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1296 | } |
| 1297 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1298 | return 0; |
| 1299 | } |
| 1300 | |
| 1301 | static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) |
| 1302 | { |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1303 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
| 1304 | return ERR_PTR(-EPERM); |
| 1305 | |
Daniel Borkmann | 113214b | 2016-06-30 17:24:44 +0200 | [diff] [blame] | 1306 | return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1307 | } |
| 1308 | |
| 1309 | int sk_attach_bpf(u32 ufd, struct sock *sk) |
| 1310 | { |
| 1311 | struct bpf_prog *prog = __get_bpf(ufd, sk); |
| 1312 | int err; |
| 1313 | |
| 1314 | if (IS_ERR(prog)) |
| 1315 | return PTR_ERR(prog); |
| 1316 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 1317 | err = __sk_attach_prog(prog, sk); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1318 | if (err < 0) { |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1319 | bpf_prog_put(prog); |
Daniel Borkmann | 49b31e5 | 2015-03-02 12:25:51 +0100 | [diff] [blame] | 1320 | return err; |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1321 | } |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1322 | |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 1323 | return 0; |
| 1324 | } |
| 1325 | |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 1326 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) |
| 1327 | { |
| 1328 | struct bpf_prog *prog = __get_bpf(ufd, sk); |
| 1329 | int err; |
| 1330 | |
| 1331 | if (IS_ERR(prog)) |
| 1332 | return PTR_ERR(prog); |
| 1333 | |
| 1334 | err = __reuseport_attach_prog(prog, sk); |
| 1335 | if (err < 0) { |
| 1336 | bpf_prog_put(prog); |
| 1337 | return err; |
| 1338 | } |
| 1339 | |
| 1340 | return 0; |
| 1341 | } |
| 1342 | |
Daniel Borkmann | 21cafc1 | 2016-02-19 23:05:24 +0100 | [diff] [blame] | 1343 | struct bpf_scratchpad { |
| 1344 | union { |
| 1345 | __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; |
| 1346 | u8 buff[MAX_BPF_STACK]; |
| 1347 | }; |
| 1348 | }; |
| 1349 | |
| 1350 | static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1351 | |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 1352 | static inline int __bpf_try_make_writable(struct sk_buff *skb, |
| 1353 | unsigned int write_len) |
| 1354 | { |
| 1355 | return skb_ensure_writable(skb, write_len); |
| 1356 | } |
| 1357 | |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1358 | static inline int bpf_try_make_writable(struct sk_buff *skb, |
| 1359 | unsigned int write_len) |
| 1360 | { |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 1361 | int err = __bpf_try_make_writable(skb, write_len); |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1362 | |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1363 | bpf_compute_data_end(skb); |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1364 | return err; |
| 1365 | } |
| 1366 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1367 | static int bpf_try_make_head_writable(struct sk_buff *skb) |
| 1368 | { |
| 1369 | return bpf_try_make_writable(skb, skb_headlen(skb)); |
| 1370 | } |
| 1371 | |
Daniel Borkmann | a2bfe6b | 2016-08-05 00:11:11 +0200 | [diff] [blame] | 1372 | static inline void bpf_push_mac_rcsum(struct sk_buff *skb) |
| 1373 | { |
| 1374 | if (skb_at_tc_ingress(skb)) |
| 1375 | skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); |
| 1376 | } |
| 1377 | |
Daniel Borkmann | 8065694 | 2016-08-05 00:11:13 +0200 | [diff] [blame] | 1378 | static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) |
| 1379 | { |
| 1380 | if (skb_at_tc_ingress(skb)) |
| 1381 | skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); |
| 1382 | } |
| 1383 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1384 | BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, |
| 1385 | const void *, from, u32, len, u64, flags) |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1386 | { |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1387 | void *ptr; |
| 1388 | |
Daniel Borkmann | 8afd54c | 2016-03-04 15:15:03 +0100 | [diff] [blame] | 1389 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1390 | return -EINVAL; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1391 | if (unlikely(offset > 0xffff)) |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1392 | return -EFAULT; |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1393 | if (unlikely(bpf_try_make_writable(skb, offset + len))) |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1394 | return -EFAULT; |
| 1395 | |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1396 | ptr = skb->data + offset; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1397 | if (flags & BPF_F_RECOMPUTE_CSUM) |
Daniel Borkmann | 479ffcc | 2016-08-05 00:11:12 +0200 | [diff] [blame] | 1398 | __skb_postpull_rcsum(skb, ptr, len, offset); |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1399 | |
| 1400 | memcpy(ptr, from, len); |
| 1401 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1402 | if (flags & BPF_F_RECOMPUTE_CSUM) |
Daniel Borkmann | 479ffcc | 2016-08-05 00:11:12 +0200 | [diff] [blame] | 1403 | __skb_postpush_rcsum(skb, ptr, len, offset); |
Daniel Borkmann | 8afd54c | 2016-03-04 15:15:03 +0100 | [diff] [blame] | 1404 | if (flags & BPF_F_INVALIDATE_HASH) |
| 1405 | skb_clear_hash(skb); |
Daniel Borkmann | f8ffad69 | 2016-01-07 15:50:23 +0100 | [diff] [blame] | 1406 | |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1407 | return 0; |
| 1408 | } |
| 1409 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1410 | static const struct bpf_func_proto bpf_skb_store_bytes_proto = { |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1411 | .func = bpf_skb_store_bytes, |
| 1412 | .gpl_only = false, |
| 1413 | .ret_type = RET_INTEGER, |
| 1414 | .arg1_type = ARG_PTR_TO_CTX, |
| 1415 | .arg2_type = ARG_ANYTHING, |
| 1416 | .arg3_type = ARG_PTR_TO_STACK, |
| 1417 | .arg4_type = ARG_CONST_STACK_SIZE, |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1418 | .arg5_type = ARG_ANYTHING, |
| 1419 | }; |
| 1420 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1421 | BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, |
| 1422 | void *, to, u32, len) |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1423 | { |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1424 | void *ptr; |
| 1425 | |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1426 | if (unlikely(offset > 0xffff)) |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 1427 | goto err_clear; |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1428 | |
| 1429 | ptr = skb_header_pointer(skb, offset, len, to); |
| 1430 | if (unlikely(!ptr)) |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 1431 | goto err_clear; |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1432 | if (ptr != to) |
| 1433 | memcpy(to, ptr, len); |
| 1434 | |
| 1435 | return 0; |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 1436 | err_clear: |
| 1437 | memset(to, 0, len); |
| 1438 | return -EFAULT; |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1439 | } |
| 1440 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1441 | static const struct bpf_func_proto bpf_skb_load_bytes_proto = { |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1442 | .func = bpf_skb_load_bytes, |
| 1443 | .gpl_only = false, |
| 1444 | .ret_type = RET_INTEGER, |
| 1445 | .arg1_type = ARG_PTR_TO_CTX, |
| 1446 | .arg2_type = ARG_ANYTHING, |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 1447 | .arg3_type = ARG_PTR_TO_RAW_STACK, |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 1448 | .arg4_type = ARG_CONST_STACK_SIZE, |
| 1449 | }; |
| 1450 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1451 | BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) |
| 1452 | { |
| 1453 | /* Idea is the following: should the needed direct read/write |
| 1454 | * test fail during runtime, we can pull in more data and redo |
| 1455 | * again, since implicitly, we invalidate previous checks here. |
| 1456 | * |
| 1457 | * Or, since we know how much we need to make read/writeable, |
| 1458 | * this can be done once at the program beginning for direct |
| 1459 | * access case. By this we overcome limitations of only current |
| 1460 | * headroom being accessible. |
| 1461 | */ |
| 1462 | return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); |
| 1463 | } |
| 1464 | |
| 1465 | static const struct bpf_func_proto bpf_skb_pull_data_proto = { |
| 1466 | .func = bpf_skb_pull_data, |
| 1467 | .gpl_only = false, |
| 1468 | .ret_type = RET_INTEGER, |
| 1469 | .arg1_type = ARG_PTR_TO_CTX, |
| 1470 | .arg2_type = ARG_ANYTHING, |
| 1471 | }; |
| 1472 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1473 | BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, |
| 1474 | u64, from, u64, to, u64, flags) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1475 | { |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1476 | __sum16 *ptr; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1477 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1478 | if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) |
| 1479 | return -EINVAL; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1480 | if (unlikely(offset > 0xffff || offset & 1)) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1481 | return -EFAULT; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1482 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1483 | return -EFAULT; |
| 1484 | |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1485 | ptr = (__sum16 *)(skb->data + offset); |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1486 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
Daniel Borkmann | 8050c0f | 2016-03-04 15:15:02 +0100 | [diff] [blame] | 1487 | case 0: |
| 1488 | if (unlikely(from != 0)) |
| 1489 | return -EINVAL; |
| 1490 | |
| 1491 | csum_replace_by_diff(ptr, to); |
| 1492 | break; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1493 | case 2: |
| 1494 | csum_replace2(ptr, from, to); |
| 1495 | break; |
| 1496 | case 4: |
| 1497 | csum_replace4(ptr, from, to); |
| 1498 | break; |
| 1499 | default: |
| 1500 | return -EINVAL; |
| 1501 | } |
| 1502 | |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1503 | return 0; |
| 1504 | } |
| 1505 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1506 | static const struct bpf_func_proto bpf_l3_csum_replace_proto = { |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1507 | .func = bpf_l3_csum_replace, |
| 1508 | .gpl_only = false, |
| 1509 | .ret_type = RET_INTEGER, |
| 1510 | .arg1_type = ARG_PTR_TO_CTX, |
| 1511 | .arg2_type = ARG_ANYTHING, |
| 1512 | .arg3_type = ARG_ANYTHING, |
| 1513 | .arg4_type = ARG_ANYTHING, |
| 1514 | .arg5_type = ARG_ANYTHING, |
| 1515 | }; |
| 1516 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1517 | BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, |
| 1518 | u64, from, u64, to, u64, flags) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1519 | { |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1520 | bool is_pseudo = flags & BPF_F_PSEUDO_HDR; |
Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 1521 | bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1522 | __sum16 *ptr; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1523 | |
Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 1524 | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | |
| 1525 | BPF_F_HDR_FIELD_MASK))) |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1526 | return -EINVAL; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1527 | if (unlikely(offset > 0xffff || offset & 1)) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1528 | return -EFAULT; |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1529 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1530 | return -EFAULT; |
| 1531 | |
Daniel Borkmann | 0ed661d | 2016-08-11 21:38:37 +0200 | [diff] [blame] | 1532 | ptr = (__sum16 *)(skb->data + offset); |
Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 1533 | if (is_mmzero && !*ptr) |
| 1534 | return 0; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1535 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1536 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 1537 | case 0: |
| 1538 | if (unlikely(from != 0)) |
| 1539 | return -EINVAL; |
| 1540 | |
| 1541 | inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); |
| 1542 | break; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1543 | case 2: |
| 1544 | inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); |
| 1545 | break; |
| 1546 | case 4: |
| 1547 | inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); |
| 1548 | break; |
| 1549 | default: |
| 1550 | return -EINVAL; |
| 1551 | } |
| 1552 | |
Daniel Borkmann | 2f72959 | 2016-02-19 23:05:26 +0100 | [diff] [blame] | 1553 | if (is_mmzero && !*ptr) |
| 1554 | *ptr = CSUM_MANGLED_0; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1555 | return 0; |
| 1556 | } |
| 1557 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1558 | static const struct bpf_func_proto bpf_l4_csum_replace_proto = { |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 1559 | .func = bpf_l4_csum_replace, |
| 1560 | .gpl_only = false, |
| 1561 | .ret_type = RET_INTEGER, |
| 1562 | .arg1_type = ARG_PTR_TO_CTX, |
| 1563 | .arg2_type = ARG_ANYTHING, |
| 1564 | .arg3_type = ARG_ANYTHING, |
| 1565 | .arg4_type = ARG_ANYTHING, |
| 1566 | .arg5_type = ARG_ANYTHING, |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 1567 | }; |
| 1568 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1569 | BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, |
| 1570 | __be32 *, to, u32, to_size, __wsum, seed) |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 1571 | { |
Daniel Borkmann | 21cafc1 | 2016-02-19 23:05:24 +0100 | [diff] [blame] | 1572 | struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1573 | u32 diff_size = from_size + to_size; |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 1574 | int i, j = 0; |
| 1575 | |
| 1576 | /* This is quite flexible, some examples: |
| 1577 | * |
| 1578 | * from_size == 0, to_size > 0, seed := csum --> pushing data |
| 1579 | * from_size > 0, to_size == 0, seed := csum --> pulling data |
| 1580 | * from_size > 0, to_size > 0, seed := 0 --> diffing data |
| 1581 | * |
| 1582 | * Even for diffing, from_size and to_size don't need to be equal. |
| 1583 | */ |
| 1584 | if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || |
| 1585 | diff_size > sizeof(sp->diff))) |
| 1586 | return -EINVAL; |
| 1587 | |
| 1588 | for (i = 0; i < from_size / sizeof(__be32); i++, j++) |
| 1589 | sp->diff[j] = ~from[i]; |
| 1590 | for (i = 0; i < to_size / sizeof(__be32); i++, j++) |
| 1591 | sp->diff[j] = to[i]; |
| 1592 | |
| 1593 | return csum_partial(sp->diff, diff_size, seed); |
| 1594 | } |
| 1595 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1596 | static const struct bpf_func_proto bpf_csum_diff_proto = { |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 1597 | .func = bpf_csum_diff, |
| 1598 | .gpl_only = false, |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1599 | .pkt_access = true, |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 1600 | .ret_type = RET_INTEGER, |
| 1601 | .arg1_type = ARG_PTR_TO_STACK, |
| 1602 | .arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO, |
| 1603 | .arg3_type = ARG_PTR_TO_STACK, |
| 1604 | .arg4_type = ARG_CONST_STACK_SIZE_OR_ZERO, |
| 1605 | .arg5_type = ARG_ANYTHING, |
| 1606 | }; |
| 1607 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1608 | BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) |
| 1609 | { |
| 1610 | /* The interface is to be used in combination with bpf_csum_diff() |
| 1611 | * for direct packet writes. csum rotation for alignment as well |
| 1612 | * as emulating csum_sub() can be done from the eBPF program. |
| 1613 | */ |
| 1614 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 1615 | return (skb->csum = csum_add(skb->csum, csum)); |
| 1616 | |
| 1617 | return -ENOTSUPP; |
| 1618 | } |
| 1619 | |
| 1620 | static const struct bpf_func_proto bpf_csum_update_proto = { |
| 1621 | .func = bpf_csum_update, |
| 1622 | .gpl_only = false, |
| 1623 | .ret_type = RET_INTEGER, |
| 1624 | .arg1_type = ARG_PTR_TO_CTX, |
| 1625 | .arg2_type = ARG_ANYTHING, |
| 1626 | }; |
| 1627 | |
Daniel Borkmann | a70b506 | 2016-06-10 21:19:06 +0200 | [diff] [blame] | 1628 | static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) |
| 1629 | { |
Daniel Borkmann | a70b506 | 2016-06-10 21:19:06 +0200 | [diff] [blame] | 1630 | return dev_forward_skb(dev, skb); |
| 1631 | } |
| 1632 | |
Martin KaFai Lau | 4e3264d | 2016-11-09 15:36:33 -0800 | [diff] [blame] | 1633 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, |
| 1634 | struct sk_buff *skb) |
| 1635 | { |
| 1636 | int ret = ____dev_forward_skb(dev, skb); |
| 1637 | |
| 1638 | if (likely(!ret)) { |
| 1639 | skb->dev = dev; |
| 1640 | ret = netif_rx(skb); |
| 1641 | } |
| 1642 | |
| 1643 | return ret; |
| 1644 | } |
| 1645 | |
Daniel Borkmann | a70b506 | 2016-06-10 21:19:06 +0200 | [diff] [blame] | 1646 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
| 1647 | { |
| 1648 | int ret; |
| 1649 | |
| 1650 | if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { |
| 1651 | net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); |
| 1652 | kfree_skb(skb); |
| 1653 | return -ENETDOWN; |
| 1654 | } |
| 1655 | |
| 1656 | skb->dev = dev; |
| 1657 | |
| 1658 | __this_cpu_inc(xmit_recursion); |
| 1659 | ret = dev_queue_xmit(skb); |
| 1660 | __this_cpu_dec(xmit_recursion); |
| 1661 | |
| 1662 | return ret; |
| 1663 | } |
| 1664 | |
Martin KaFai Lau | 4e3264d | 2016-11-09 15:36:33 -0800 | [diff] [blame] | 1665 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
| 1666 | u32 flags) |
| 1667 | { |
| 1668 | /* skb->mac_len is not set on normal egress */ |
| 1669 | unsigned int mlen = skb->network_header - skb->mac_header; |
| 1670 | |
| 1671 | __skb_pull(skb, mlen); |
| 1672 | |
| 1673 | /* At ingress, the mac header has already been pulled once. |
| 1674 | * At egress, skb_pospull_rcsum has to be done in case that |
| 1675 | * the skb is originated from ingress (i.e. a forwarded skb) |
| 1676 | * to ensure that rcsum starts at net header. |
| 1677 | */ |
| 1678 | if (!skb_at_tc_ingress(skb)) |
| 1679 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
| 1680 | skb_pop_mac_header(skb); |
| 1681 | skb_reset_mac_len(skb); |
| 1682 | return flags & BPF_F_INGRESS ? |
| 1683 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); |
| 1684 | } |
| 1685 | |
| 1686 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, |
| 1687 | u32 flags) |
| 1688 | { |
| 1689 | bpf_push_mac_rcsum(skb); |
| 1690 | return flags & BPF_F_INGRESS ? |
| 1691 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); |
| 1692 | } |
| 1693 | |
| 1694 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, |
| 1695 | u32 flags) |
| 1696 | { |
| 1697 | switch (dev->type) { |
| 1698 | case ARPHRD_TUNNEL: |
| 1699 | case ARPHRD_TUNNEL6: |
| 1700 | case ARPHRD_SIT: |
| 1701 | case ARPHRD_IPGRE: |
| 1702 | case ARPHRD_VOID: |
| 1703 | case ARPHRD_NONE: |
| 1704 | return __bpf_redirect_no_mac(skb, dev, flags); |
| 1705 | default: |
| 1706 | return __bpf_redirect_common(skb, dev, flags); |
| 1707 | } |
| 1708 | } |
| 1709 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1710 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1711 | { |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1712 | struct net_device *dev; |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1713 | struct sk_buff *clone; |
| 1714 | int ret; |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1715 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1716 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
| 1717 | return -EINVAL; |
| 1718 | |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1719 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); |
| 1720 | if (unlikely(!dev)) |
| 1721 | return -EINVAL; |
| 1722 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1723 | clone = skb_clone(skb, GFP_ATOMIC); |
| 1724 | if (unlikely(!clone)) |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1725 | return -ENOMEM; |
| 1726 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 1727 | /* For direct write, we need to keep the invariant that the skbs |
| 1728 | * we're dealing with need to be uncloned. Should uncloning fail |
| 1729 | * here, we need to free the just generated clone to unclone once |
| 1730 | * again. |
| 1731 | */ |
| 1732 | ret = bpf_try_make_head_writable(skb); |
| 1733 | if (unlikely(ret)) { |
| 1734 | kfree_skb(clone); |
| 1735 | return -ENOMEM; |
| 1736 | } |
| 1737 | |
Martin KaFai Lau | 4e3264d | 2016-11-09 15:36:33 -0800 | [diff] [blame] | 1738 | return __bpf_redirect(clone, dev, flags); |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1739 | } |
| 1740 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1741 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 1742 | .func = bpf_clone_redirect, |
| 1743 | .gpl_only = false, |
| 1744 | .ret_type = RET_INTEGER, |
| 1745 | .arg1_type = ARG_PTR_TO_CTX, |
| 1746 | .arg2_type = ARG_ANYTHING, |
| 1747 | .arg3_type = ARG_ANYTHING, |
| 1748 | }; |
| 1749 | |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1750 | struct redirect_info { |
| 1751 | u32 ifindex; |
| 1752 | u32 flags; |
| 1753 | }; |
| 1754 | |
| 1755 | static DEFINE_PER_CPU(struct redirect_info, redirect_info); |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1756 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1757 | BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1758 | { |
| 1759 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); |
| 1760 | |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1761 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
| 1762 | return TC_ACT_SHOT; |
| 1763 | |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1764 | ri->ifindex = ifindex; |
| 1765 | ri->flags = flags; |
Daniel Borkmann | 781c53b | 2016-01-11 01:16:38 +0100 | [diff] [blame] | 1766 | |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1767 | return TC_ACT_REDIRECT; |
| 1768 | } |
| 1769 | |
| 1770 | int skb_do_redirect(struct sk_buff *skb) |
| 1771 | { |
| 1772 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); |
| 1773 | struct net_device *dev; |
| 1774 | |
| 1775 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); |
| 1776 | ri->ifindex = 0; |
| 1777 | if (unlikely(!dev)) { |
| 1778 | kfree_skb(skb); |
| 1779 | return -EINVAL; |
| 1780 | } |
| 1781 | |
Martin KaFai Lau | 4e3264d | 2016-11-09 15:36:33 -0800 | [diff] [blame] | 1782 | return __bpf_redirect(skb, dev, ri->flags); |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1783 | } |
| 1784 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 1785 | static const struct bpf_func_proto bpf_redirect_proto = { |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 1786 | .func = bpf_redirect, |
| 1787 | .gpl_only = false, |
| 1788 | .ret_type = RET_INTEGER, |
| 1789 | .arg1_type = ARG_ANYTHING, |
| 1790 | .arg2_type = ARG_ANYTHING, |
| 1791 | }; |
| 1792 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1793 | BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) |
Daniel Borkmann | 8d20aab | 2015-07-15 14:21:42 +0200 | [diff] [blame] | 1794 | { |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1795 | return task_get_classid(skb); |
Daniel Borkmann | 8d20aab | 2015-07-15 14:21:42 +0200 | [diff] [blame] | 1796 | } |
| 1797 | |
| 1798 | static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { |
| 1799 | .func = bpf_get_cgroup_classid, |
| 1800 | .gpl_only = false, |
| 1801 | .ret_type = RET_INTEGER, |
| 1802 | .arg1_type = ARG_PTR_TO_CTX, |
| 1803 | }; |
| 1804 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1805 | BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) |
Daniel Borkmann | c46646d | 2015-09-30 01:41:51 +0200 | [diff] [blame] | 1806 | { |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1807 | return dst_tclassid(skb); |
Daniel Borkmann | c46646d | 2015-09-30 01:41:51 +0200 | [diff] [blame] | 1808 | } |
| 1809 | |
| 1810 | static const struct bpf_func_proto bpf_get_route_realm_proto = { |
| 1811 | .func = bpf_get_route_realm, |
| 1812 | .gpl_only = false, |
| 1813 | .ret_type = RET_INTEGER, |
| 1814 | .arg1_type = ARG_PTR_TO_CTX, |
| 1815 | }; |
| 1816 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1817 | BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) |
Daniel Borkmann | 13c5c24 | 2016-07-03 01:28:47 +0200 | [diff] [blame] | 1818 | { |
| 1819 | /* If skb_clear_hash() was called due to mangling, we can |
| 1820 | * trigger SW recalculation here. Later access to hash |
| 1821 | * can then use the inline skb->hash via context directly |
| 1822 | * instead of calling this helper again. |
| 1823 | */ |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1824 | return skb_get_hash(skb); |
Daniel Borkmann | 13c5c24 | 2016-07-03 01:28:47 +0200 | [diff] [blame] | 1825 | } |
| 1826 | |
| 1827 | static const struct bpf_func_proto bpf_get_hash_recalc_proto = { |
| 1828 | .func = bpf_get_hash_recalc, |
| 1829 | .gpl_only = false, |
| 1830 | .ret_type = RET_INTEGER, |
| 1831 | .arg1_type = ARG_PTR_TO_CTX, |
| 1832 | }; |
| 1833 | |
Daniel Borkmann | 7a4b28c | 2016-09-23 01:28:37 +0200 | [diff] [blame] | 1834 | BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) |
| 1835 | { |
| 1836 | /* After all direct packet write, this can be used once for |
| 1837 | * triggering a lazy recalc on next skb_get_hash() invocation. |
| 1838 | */ |
| 1839 | skb_clear_hash(skb); |
| 1840 | return 0; |
| 1841 | } |
| 1842 | |
| 1843 | static const struct bpf_func_proto bpf_set_hash_invalid_proto = { |
| 1844 | .func = bpf_set_hash_invalid, |
| 1845 | .gpl_only = false, |
| 1846 | .ret_type = RET_INTEGER, |
| 1847 | .arg1_type = ARG_PTR_TO_CTX, |
| 1848 | }; |
| 1849 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1850 | BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, |
| 1851 | u16, vlan_tci) |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1852 | { |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1853 | int ret; |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1854 | |
| 1855 | if (unlikely(vlan_proto != htons(ETH_P_8021Q) && |
| 1856 | vlan_proto != htons(ETH_P_8021AD))) |
| 1857 | vlan_proto = htons(ETH_P_8021Q); |
| 1858 | |
Daniel Borkmann | 8065694 | 2016-08-05 00:11:13 +0200 | [diff] [blame] | 1859 | bpf_push_mac_rcsum(skb); |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1860 | ret = skb_vlan_push(skb, vlan_proto, vlan_tci); |
Daniel Borkmann | 8065694 | 2016-08-05 00:11:13 +0200 | [diff] [blame] | 1861 | bpf_pull_mac_rcsum(skb); |
| 1862 | |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1863 | bpf_compute_data_end(skb); |
| 1864 | return ret; |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1865 | } |
| 1866 | |
| 1867 | const struct bpf_func_proto bpf_skb_vlan_push_proto = { |
| 1868 | .func = bpf_skb_vlan_push, |
| 1869 | .gpl_only = false, |
| 1870 | .ret_type = RET_INTEGER, |
| 1871 | .arg1_type = ARG_PTR_TO_CTX, |
| 1872 | .arg2_type = ARG_ANYTHING, |
| 1873 | .arg3_type = ARG_ANYTHING, |
| 1874 | }; |
Alexei Starovoitov | 4d9c5c5 | 2015-07-20 20:34:19 -0700 | [diff] [blame] | 1875 | EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto); |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1876 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 1877 | BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1878 | { |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1879 | int ret; |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1880 | |
Daniel Borkmann | 8065694 | 2016-08-05 00:11:13 +0200 | [diff] [blame] | 1881 | bpf_push_mac_rcsum(skb); |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1882 | ret = skb_vlan_pop(skb); |
Daniel Borkmann | 8065694 | 2016-08-05 00:11:13 +0200 | [diff] [blame] | 1883 | bpf_pull_mac_rcsum(skb); |
| 1884 | |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 1885 | bpf_compute_data_end(skb); |
| 1886 | return ret; |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1887 | } |
| 1888 | |
| 1889 | const struct bpf_func_proto bpf_skb_vlan_pop_proto = { |
| 1890 | .func = bpf_skb_vlan_pop, |
| 1891 | .gpl_only = false, |
| 1892 | .ret_type = RET_INTEGER, |
| 1893 | .arg1_type = ARG_PTR_TO_CTX, |
| 1894 | }; |
Alexei Starovoitov | 4d9c5c5 | 2015-07-20 20:34:19 -0700 | [diff] [blame] | 1895 | EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto); |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 1896 | |
Daniel Borkmann | 6578171 | 2016-06-28 12:18:27 +0200 | [diff] [blame] | 1897 | static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) |
| 1898 | { |
| 1899 | /* Caller already did skb_cow() with len as headroom, |
| 1900 | * so no need to do it here. |
| 1901 | */ |
| 1902 | skb_push(skb, len); |
| 1903 | memmove(skb->data, skb->data + len, off); |
| 1904 | memset(skb->data + off, 0, len); |
| 1905 | |
| 1906 | /* No skb_postpush_rcsum(skb, skb->data + off, len) |
| 1907 | * needed here as it does not change the skb->csum |
| 1908 | * result for checksum complete when summing over |
| 1909 | * zeroed blocks. |
| 1910 | */ |
| 1911 | return 0; |
| 1912 | } |
| 1913 | |
| 1914 | static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) |
| 1915 | { |
| 1916 | /* skb_ensure_writable() is not needed here, as we're |
| 1917 | * already working on an uncloned skb. |
| 1918 | */ |
| 1919 | if (unlikely(!pskb_may_pull(skb, off + len))) |
| 1920 | return -ENOMEM; |
| 1921 | |
| 1922 | skb_postpull_rcsum(skb, skb->data + off, len); |
| 1923 | memmove(skb->data + len, skb->data, off); |
| 1924 | __skb_pull(skb, len); |
| 1925 | |
| 1926 | return 0; |
| 1927 | } |
| 1928 | |
| 1929 | static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) |
| 1930 | { |
| 1931 | bool trans_same = skb->transport_header == skb->network_header; |
| 1932 | int ret; |
| 1933 | |
| 1934 | /* There's no need for __skb_push()/__skb_pull() pair to |
| 1935 | * get to the start of the mac header as we're guaranteed |
| 1936 | * to always start from here under eBPF. |
| 1937 | */ |
| 1938 | ret = bpf_skb_generic_push(skb, off, len); |
| 1939 | if (likely(!ret)) { |
| 1940 | skb->mac_header -= len; |
| 1941 | skb->network_header -= len; |
| 1942 | if (trans_same) |
| 1943 | skb->transport_header = skb->network_header; |
| 1944 | } |
| 1945 | |
| 1946 | return ret; |
| 1947 | } |
| 1948 | |
| 1949 | static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) |
| 1950 | { |
| 1951 | bool trans_same = skb->transport_header == skb->network_header; |
| 1952 | int ret; |
| 1953 | |
| 1954 | /* Same here, __skb_push()/__skb_pull() pair not needed. */ |
| 1955 | ret = bpf_skb_generic_pop(skb, off, len); |
| 1956 | if (likely(!ret)) { |
| 1957 | skb->mac_header += len; |
| 1958 | skb->network_header += len; |
| 1959 | if (trans_same) |
| 1960 | skb->transport_header = skb->network_header; |
| 1961 | } |
| 1962 | |
| 1963 | return ret; |
| 1964 | } |
| 1965 | |
| 1966 | static int bpf_skb_proto_4_to_6(struct sk_buff *skb) |
| 1967 | { |
| 1968 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); |
| 1969 | u32 off = skb->network_header - skb->mac_header; |
| 1970 | int ret; |
| 1971 | |
| 1972 | ret = skb_cow(skb, len_diff); |
| 1973 | if (unlikely(ret < 0)) |
| 1974 | return ret; |
| 1975 | |
| 1976 | ret = bpf_skb_net_hdr_push(skb, off, len_diff); |
| 1977 | if (unlikely(ret < 0)) |
| 1978 | return ret; |
| 1979 | |
| 1980 | if (skb_is_gso(skb)) { |
| 1981 | /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to |
| 1982 | * be changed into SKB_GSO_TCPV6. |
| 1983 | */ |
| 1984 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { |
| 1985 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; |
| 1986 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; |
| 1987 | } |
| 1988 | |
| 1989 | /* Due to IPv6 header, MSS needs to be downgraded. */ |
| 1990 | skb_shinfo(skb)->gso_size -= len_diff; |
| 1991 | /* Header must be checked, and gso_segs recomputed. */ |
| 1992 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 1993 | skb_shinfo(skb)->gso_segs = 0; |
| 1994 | } |
| 1995 | |
| 1996 | skb->protocol = htons(ETH_P_IPV6); |
| 1997 | skb_clear_hash(skb); |
| 1998 | |
| 1999 | return 0; |
| 2000 | } |
| 2001 | |
| 2002 | static int bpf_skb_proto_6_to_4(struct sk_buff *skb) |
| 2003 | { |
| 2004 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); |
| 2005 | u32 off = skb->network_header - skb->mac_header; |
| 2006 | int ret; |
| 2007 | |
| 2008 | ret = skb_unclone(skb, GFP_ATOMIC); |
| 2009 | if (unlikely(ret < 0)) |
| 2010 | return ret; |
| 2011 | |
| 2012 | ret = bpf_skb_net_hdr_pop(skb, off, len_diff); |
| 2013 | if (unlikely(ret < 0)) |
| 2014 | return ret; |
| 2015 | |
| 2016 | if (skb_is_gso(skb)) { |
| 2017 | /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to |
| 2018 | * be changed into SKB_GSO_TCPV4. |
| 2019 | */ |
| 2020 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { |
| 2021 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; |
| 2022 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; |
| 2023 | } |
| 2024 | |
| 2025 | /* Due to IPv4 header, MSS can be upgraded. */ |
| 2026 | skb_shinfo(skb)->gso_size += len_diff; |
| 2027 | /* Header must be checked, and gso_segs recomputed. */ |
| 2028 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 2029 | skb_shinfo(skb)->gso_segs = 0; |
| 2030 | } |
| 2031 | |
| 2032 | skb->protocol = htons(ETH_P_IP); |
| 2033 | skb_clear_hash(skb); |
| 2034 | |
| 2035 | return 0; |
| 2036 | } |
| 2037 | |
| 2038 | static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) |
| 2039 | { |
| 2040 | __be16 from_proto = skb->protocol; |
| 2041 | |
| 2042 | if (from_proto == htons(ETH_P_IP) && |
| 2043 | to_proto == htons(ETH_P_IPV6)) |
| 2044 | return bpf_skb_proto_4_to_6(skb); |
| 2045 | |
| 2046 | if (from_proto == htons(ETH_P_IPV6) && |
| 2047 | to_proto == htons(ETH_P_IP)) |
| 2048 | return bpf_skb_proto_6_to_4(skb); |
| 2049 | |
| 2050 | return -ENOTSUPP; |
| 2051 | } |
| 2052 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2053 | BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, |
| 2054 | u64, flags) |
Daniel Borkmann | 6578171 | 2016-06-28 12:18:27 +0200 | [diff] [blame] | 2055 | { |
Daniel Borkmann | 6578171 | 2016-06-28 12:18:27 +0200 | [diff] [blame] | 2056 | int ret; |
| 2057 | |
| 2058 | if (unlikely(flags)) |
| 2059 | return -EINVAL; |
| 2060 | |
| 2061 | /* General idea is that this helper does the basic groundwork |
| 2062 | * needed for changing the protocol, and eBPF program fills the |
| 2063 | * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() |
| 2064 | * and other helpers, rather than passing a raw buffer here. |
| 2065 | * |
| 2066 | * The rationale is to keep this minimal and without a need to |
| 2067 | * deal with raw packet data. F.e. even if we would pass buffers |
| 2068 | * here, the program still needs to call the bpf_lX_csum_replace() |
| 2069 | * helpers anyway. Plus, this way we keep also separation of |
| 2070 | * concerns, since f.e. bpf_skb_store_bytes() should only take |
| 2071 | * care of stores. |
| 2072 | * |
| 2073 | * Currently, additional options and extension header space are |
| 2074 | * not supported, but flags register is reserved so we can adapt |
| 2075 | * that. For offloads, we mark packet as dodgy, so that headers |
| 2076 | * need to be verified first. |
| 2077 | */ |
| 2078 | ret = bpf_skb_proto_xlat(skb, proto); |
| 2079 | bpf_compute_data_end(skb); |
| 2080 | return ret; |
| 2081 | } |
| 2082 | |
| 2083 | static const struct bpf_func_proto bpf_skb_change_proto_proto = { |
| 2084 | .func = bpf_skb_change_proto, |
| 2085 | .gpl_only = false, |
| 2086 | .ret_type = RET_INTEGER, |
| 2087 | .arg1_type = ARG_PTR_TO_CTX, |
| 2088 | .arg2_type = ARG_ANYTHING, |
| 2089 | .arg3_type = ARG_ANYTHING, |
| 2090 | }; |
| 2091 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2092 | BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) |
Daniel Borkmann | d2485c4 | 2016-06-28 12:18:28 +0200 | [diff] [blame] | 2093 | { |
Daniel Borkmann | d2485c4 | 2016-06-28 12:18:28 +0200 | [diff] [blame] | 2094 | /* We only allow a restricted subset to be changed for now. */ |
Daniel Borkmann | 45c7fff | 2016-08-18 01:00:38 +0200 | [diff] [blame] | 2095 | if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || |
| 2096 | !skb_pkt_type_ok(pkt_type))) |
Daniel Borkmann | d2485c4 | 2016-06-28 12:18:28 +0200 | [diff] [blame] | 2097 | return -EINVAL; |
| 2098 | |
| 2099 | skb->pkt_type = pkt_type; |
| 2100 | return 0; |
| 2101 | } |
| 2102 | |
| 2103 | static const struct bpf_func_proto bpf_skb_change_type_proto = { |
| 2104 | .func = bpf_skb_change_type, |
| 2105 | .gpl_only = false, |
| 2106 | .ret_type = RET_INTEGER, |
| 2107 | .arg1_type = ARG_PTR_TO_CTX, |
| 2108 | .arg2_type = ARG_ANYTHING, |
| 2109 | }; |
| 2110 | |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2111 | static u32 __bpf_skb_min_len(const struct sk_buff *skb) |
| 2112 | { |
| 2113 | u32 min_len = skb_network_offset(skb); |
| 2114 | |
| 2115 | if (skb_transport_header_was_set(skb)) |
| 2116 | min_len = skb_transport_offset(skb); |
| 2117 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 2118 | min_len = skb_checksum_start_offset(skb) + |
| 2119 | skb->csum_offset + sizeof(__sum16); |
| 2120 | return min_len; |
| 2121 | } |
| 2122 | |
| 2123 | static u32 __bpf_skb_max_len(const struct sk_buff *skb) |
| 2124 | { |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 2125 | return skb->dev->mtu + skb->dev->hard_header_len; |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2126 | } |
| 2127 | |
| 2128 | static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) |
| 2129 | { |
| 2130 | unsigned int old_len = skb->len; |
| 2131 | int ret; |
| 2132 | |
| 2133 | ret = __skb_grow_rcsum(skb, new_len); |
| 2134 | if (!ret) |
| 2135 | memset(skb->data + old_len, 0, new_len - old_len); |
| 2136 | return ret; |
| 2137 | } |
| 2138 | |
| 2139 | static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) |
| 2140 | { |
| 2141 | return __skb_trim_rcsum(skb, new_len); |
| 2142 | } |
| 2143 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2144 | BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, |
| 2145 | u64, flags) |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2146 | { |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2147 | u32 max_len = __bpf_skb_max_len(skb); |
| 2148 | u32 min_len = __bpf_skb_min_len(skb); |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2149 | int ret; |
| 2150 | |
| 2151 | if (unlikely(flags || new_len > max_len || new_len < min_len)) |
| 2152 | return -EINVAL; |
| 2153 | if (skb->encapsulation) |
| 2154 | return -ENOTSUPP; |
| 2155 | |
| 2156 | /* The basic idea of this helper is that it's performing the |
| 2157 | * needed work to either grow or trim an skb, and eBPF program |
| 2158 | * rewrites the rest via helpers like bpf_skb_store_bytes(), |
| 2159 | * bpf_lX_csum_replace() and others rather than passing a raw |
| 2160 | * buffer here. This one is a slow path helper and intended |
| 2161 | * for replies with control messages. |
| 2162 | * |
| 2163 | * Like in bpf_skb_change_proto(), we want to keep this rather |
| 2164 | * minimal and without protocol specifics so that we are able |
| 2165 | * to separate concerns as in bpf_skb_store_bytes() should only |
| 2166 | * be the one responsible for writing buffers. |
| 2167 | * |
| 2168 | * It's really expected to be a slow path operation here for |
| 2169 | * control message replies, so we're implicitly linearizing, |
| 2170 | * uncloning and drop offloads from the skb by this. |
| 2171 | */ |
| 2172 | ret = __bpf_try_make_writable(skb, skb->len); |
| 2173 | if (!ret) { |
| 2174 | if (new_len > skb->len) |
| 2175 | ret = bpf_skb_grow_rcsum(skb, new_len); |
| 2176 | else if (new_len < skb->len) |
| 2177 | ret = bpf_skb_trim_rcsum(skb, new_len); |
| 2178 | if (!ret && skb_is_gso(skb)) |
| 2179 | skb_gso_reset(skb); |
| 2180 | } |
| 2181 | |
| 2182 | bpf_compute_data_end(skb); |
| 2183 | return ret; |
| 2184 | } |
| 2185 | |
| 2186 | static const struct bpf_func_proto bpf_skb_change_tail_proto = { |
| 2187 | .func = bpf_skb_change_tail, |
| 2188 | .gpl_only = false, |
| 2189 | .ret_type = RET_INTEGER, |
| 2190 | .arg1_type = ARG_PTR_TO_CTX, |
| 2191 | .arg2_type = ARG_ANYTHING, |
| 2192 | .arg3_type = ARG_ANYTHING, |
| 2193 | }; |
| 2194 | |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 2195 | bool bpf_helper_changes_skb_data(void *func) |
| 2196 | { |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2197 | if (func == bpf_skb_vlan_push || |
| 2198 | func == bpf_skb_vlan_pop || |
| 2199 | func == bpf_skb_store_bytes || |
| 2200 | func == bpf_skb_change_proto || |
| 2201 | func == bpf_skb_change_tail || |
| 2202 | func == bpf_skb_pull_data || |
Daniel Borkmann | c1133c6 | 2017-05-25 01:05:07 +0200 | [diff] [blame] | 2203 | func == bpf_clone_redirect || |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2204 | func == bpf_l3_csum_replace || |
| 2205 | func == bpf_l4_csum_replace) |
Daniel Borkmann | 3697649 | 2016-02-19 23:05:25 +0100 | [diff] [blame] | 2206 | return true; |
| 2207 | |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 2208 | return false; |
| 2209 | } |
| 2210 | |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2211 | static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, |
Daniel Borkmann | aa7145c | 2016-07-22 01:19:42 +0200 | [diff] [blame] | 2212 | unsigned long off, unsigned long len) |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2213 | { |
Daniel Borkmann | aa7145c | 2016-07-22 01:19:42 +0200 | [diff] [blame] | 2214 | void *ptr = skb_header_pointer(skb, off, len, dst_buff); |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2215 | |
| 2216 | if (unlikely(!ptr)) |
| 2217 | return len; |
| 2218 | if (ptr != dst_buff) |
| 2219 | memcpy(dst_buff, ptr, len); |
| 2220 | |
| 2221 | return 0; |
| 2222 | } |
| 2223 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2224 | BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, |
| 2225 | u64, flags, void *, meta, u64, meta_size) |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2226 | { |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2227 | u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2228 | |
| 2229 | if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) |
| 2230 | return -EINVAL; |
| 2231 | if (unlikely(skb_size > skb->len)) |
| 2232 | return -EFAULT; |
| 2233 | |
| 2234 | return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, |
| 2235 | bpf_skb_copy); |
| 2236 | } |
| 2237 | |
| 2238 | static const struct bpf_func_proto bpf_skb_event_output_proto = { |
| 2239 | .func = bpf_skb_event_output, |
| 2240 | .gpl_only = true, |
| 2241 | .ret_type = RET_INTEGER, |
| 2242 | .arg1_type = ARG_PTR_TO_CTX, |
| 2243 | .arg2_type = ARG_CONST_MAP_PTR, |
| 2244 | .arg3_type = ARG_ANYTHING, |
| 2245 | .arg4_type = ARG_PTR_TO_STACK, |
| 2246 | .arg5_type = ARG_CONST_STACK_SIZE, |
| 2247 | }; |
| 2248 | |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2249 | static unsigned short bpf_tunnel_key_af(u64 flags) |
| 2250 | { |
| 2251 | return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; |
| 2252 | } |
| 2253 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2254 | BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, |
| 2255 | u32, size, u64, flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2256 | { |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2257 | const struct ip_tunnel_info *info = skb_tunnel_info(skb); |
| 2258 | u8 compat[sizeof(struct bpf_tunnel_key)]; |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2259 | void *to_orig = to; |
| 2260 | int err; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2261 | |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2262 | if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { |
| 2263 | err = -EINVAL; |
| 2264 | goto err_clear; |
| 2265 | } |
| 2266 | if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { |
| 2267 | err = -EPROTO; |
| 2268 | goto err_clear; |
| 2269 | } |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2270 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2271 | err = -EINVAL; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2272 | switch (size) { |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2273 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
Daniel Borkmann | c0e760c | 2016-03-30 00:02:00 +0200 | [diff] [blame] | 2274 | case offsetof(struct bpf_tunnel_key, tunnel_ext): |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2275 | goto set_compat; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2276 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
| 2277 | /* Fixup deprecated structure layouts here, so we have |
| 2278 | * a common path later on. |
| 2279 | */ |
| 2280 | if (ip_tunnel_info_af(info) != AF_INET) |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2281 | goto err_clear; |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2282 | set_compat: |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2283 | to = (struct bpf_tunnel_key *)compat; |
| 2284 | break; |
| 2285 | default: |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2286 | goto err_clear; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2287 | } |
| 2288 | } |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2289 | |
| 2290 | to->tunnel_id = be64_to_cpu(info->key.tun_id); |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2291 | to->tunnel_tos = info->key.tos; |
| 2292 | to->tunnel_ttl = info->key.ttl; |
| 2293 | |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2294 | if (flags & BPF_F_TUNINFO_IPV6) { |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2295 | memcpy(to->remote_ipv6, &info->key.u.ipv6.src, |
| 2296 | sizeof(to->remote_ipv6)); |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2297 | to->tunnel_label = be32_to_cpu(info->key.label); |
| 2298 | } else { |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2299 | to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2300 | } |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2301 | |
| 2302 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2303 | memcpy(to_orig, to, size); |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2304 | |
| 2305 | return 0; |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2306 | err_clear: |
| 2307 | memset(to_orig, 0, size); |
| 2308 | return err; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2309 | } |
| 2310 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 2311 | static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2312 | .func = bpf_skb_get_tunnel_key, |
| 2313 | .gpl_only = false, |
| 2314 | .ret_type = RET_INTEGER, |
| 2315 | .arg1_type = ARG_PTR_TO_CTX, |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2316 | .arg2_type = ARG_PTR_TO_RAW_STACK, |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2317 | .arg3_type = ARG_CONST_STACK_SIZE, |
| 2318 | .arg4_type = ARG_ANYTHING, |
| 2319 | }; |
| 2320 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2321 | BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2322 | { |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2323 | const struct ip_tunnel_info *info = skb_tunnel_info(skb); |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2324 | int err; |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2325 | |
| 2326 | if (unlikely(!info || |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2327 | !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { |
| 2328 | err = -ENOENT; |
| 2329 | goto err_clear; |
| 2330 | } |
| 2331 | if (unlikely(size < info->options_len)) { |
| 2332 | err = -ENOMEM; |
| 2333 | goto err_clear; |
| 2334 | } |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2335 | |
| 2336 | ip_tunnel_info_opts_get(to, info); |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2337 | if (size > info->options_len) |
| 2338 | memset(to + info->options_len, 0, size - info->options_len); |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2339 | |
| 2340 | return info->options_len; |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2341 | err_clear: |
| 2342 | memset(to, 0, size); |
| 2343 | return err; |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2344 | } |
| 2345 | |
| 2346 | static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { |
| 2347 | .func = bpf_skb_get_tunnel_opt, |
| 2348 | .gpl_only = false, |
| 2349 | .ret_type = RET_INTEGER, |
| 2350 | .arg1_type = ARG_PTR_TO_CTX, |
Daniel Borkmann | 074f528 | 2016-04-13 00:10:52 +0200 | [diff] [blame] | 2351 | .arg2_type = ARG_PTR_TO_RAW_STACK, |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2352 | .arg3_type = ARG_CONST_STACK_SIZE, |
| 2353 | }; |
| 2354 | |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2355 | static struct metadata_dst __percpu *md_dst; |
| 2356 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2357 | BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, |
| 2358 | const struct bpf_tunnel_key *, from, u32, size, u64, flags) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2359 | { |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2360 | struct metadata_dst *md = this_cpu_ptr(md_dst); |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2361 | u8 compat[sizeof(struct bpf_tunnel_key)]; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2362 | struct ip_tunnel_info *info; |
| 2363 | |
Daniel Borkmann | 2208087 | 2016-03-04 15:15:05 +0100 | [diff] [blame] | 2364 | if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | |
| 2365 | BPF_F_DONT_FRAGMENT))) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2366 | return -EINVAL; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2367 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
| 2368 | switch (size) { |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2369 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
Daniel Borkmann | c0e760c | 2016-03-30 00:02:00 +0200 | [diff] [blame] | 2370 | case offsetof(struct bpf_tunnel_key, tunnel_ext): |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2371 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
| 2372 | /* Fixup deprecated structure layouts here, so we have |
| 2373 | * a common path later on. |
| 2374 | */ |
| 2375 | memcpy(compat, from, size); |
| 2376 | memset(compat + size, 0, sizeof(compat) - size); |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2377 | from = (const struct bpf_tunnel_key *) compat; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2378 | break; |
| 2379 | default: |
| 2380 | return -EINVAL; |
| 2381 | } |
| 2382 | } |
Daniel Borkmann | c0e760c | 2016-03-30 00:02:00 +0200 | [diff] [blame] | 2383 | if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || |
| 2384 | from->tunnel_ext)) |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2385 | return -EINVAL; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2386 | |
| 2387 | skb_dst_drop(skb); |
| 2388 | dst_hold((struct dst_entry *) md); |
| 2389 | skb_dst_set(skb, (struct dst_entry *) md); |
| 2390 | |
| 2391 | info = &md->u.tun_info; |
| 2392 | info->mode = IP_TUNNEL_INFO_TX; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2393 | |
Daniel Borkmann | db3c613 | 2016-03-04 15:15:07 +0100 | [diff] [blame] | 2394 | info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; |
Daniel Borkmann | 2208087 | 2016-03-04 15:15:05 +0100 | [diff] [blame] | 2395 | if (flags & BPF_F_DONT_FRAGMENT) |
| 2396 | info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; |
| 2397 | |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2398 | info->key.tun_id = cpu_to_be64(from->tunnel_id); |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2399 | info->key.tos = from->tunnel_tos; |
| 2400 | info->key.ttl = from->tunnel_ttl; |
| 2401 | |
| 2402 | if (flags & BPF_F_TUNINFO_IPV6) { |
| 2403 | info->mode |= IP_TUNNEL_INFO_IPV6; |
| 2404 | memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, |
| 2405 | sizeof(from->remote_ipv6)); |
Daniel Borkmann | 4018ab1 | 2016-03-09 03:00:05 +0100 | [diff] [blame] | 2406 | info->key.label = cpu_to_be32(from->tunnel_label) & |
| 2407 | IPV6_FLOWLABEL_MASK; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2408 | } else { |
| 2409 | info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); |
Daniel Borkmann | 2da897e | 2016-02-23 02:05:26 +0100 | [diff] [blame] | 2410 | if (flags & BPF_F_ZERO_CSUM_TX) |
| 2411 | info->key.tun_flags &= ~TUNNEL_CSUM; |
Daniel Borkmann | c6c3345 | 2016-01-11 01:16:39 +0100 | [diff] [blame] | 2412 | } |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2413 | |
| 2414 | return 0; |
| 2415 | } |
| 2416 | |
Daniel Borkmann | 577c50a | 2016-03-04 15:15:04 +0100 | [diff] [blame] | 2417 | static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2418 | .func = bpf_skb_set_tunnel_key, |
| 2419 | .gpl_only = false, |
| 2420 | .ret_type = RET_INTEGER, |
| 2421 | .arg1_type = ARG_PTR_TO_CTX, |
| 2422 | .arg2_type = ARG_PTR_TO_STACK, |
| 2423 | .arg3_type = ARG_CONST_STACK_SIZE, |
| 2424 | .arg4_type = ARG_ANYTHING, |
| 2425 | }; |
| 2426 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2427 | BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, |
| 2428 | const u8 *, from, u32, size) |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2429 | { |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2430 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
| 2431 | const struct metadata_dst *md = this_cpu_ptr(md_dst); |
| 2432 | |
| 2433 | if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) |
| 2434 | return -EINVAL; |
Daniel Borkmann | fca5fdf | 2016-03-16 01:42:51 +0100 | [diff] [blame] | 2435 | if (unlikely(size > IP_TUNNEL_OPTS_MAX)) |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2436 | return -ENOMEM; |
| 2437 | |
| 2438 | ip_tunnel_info_opts_set(info, from, size); |
| 2439 | |
| 2440 | return 0; |
| 2441 | } |
| 2442 | |
| 2443 | static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { |
| 2444 | .func = bpf_skb_set_tunnel_opt, |
| 2445 | .gpl_only = false, |
| 2446 | .ret_type = RET_INTEGER, |
| 2447 | .arg1_type = ARG_PTR_TO_CTX, |
| 2448 | .arg2_type = ARG_PTR_TO_STACK, |
| 2449 | .arg3_type = ARG_CONST_STACK_SIZE, |
| 2450 | }; |
| 2451 | |
| 2452 | static const struct bpf_func_proto * |
| 2453 | bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2454 | { |
| 2455 | if (!md_dst) { |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2456 | /* Race is not possible, since it's called from verifier |
| 2457 | * that is holding verifier mutex. |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2458 | */ |
Daniel Borkmann | fca5fdf | 2016-03-16 01:42:51 +0100 | [diff] [blame] | 2459 | md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2460 | GFP_KERNEL); |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2461 | if (!md_dst) |
| 2462 | return NULL; |
| 2463 | } |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2464 | |
| 2465 | switch (which) { |
| 2466 | case BPF_FUNC_skb_set_tunnel_key: |
| 2467 | return &bpf_skb_set_tunnel_key_proto; |
| 2468 | case BPF_FUNC_skb_set_tunnel_opt: |
| 2469 | return &bpf_skb_set_tunnel_opt_proto; |
| 2470 | default: |
| 2471 | return NULL; |
| 2472 | } |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2473 | } |
| 2474 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2475 | BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, |
| 2476 | u32, idx) |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2477 | { |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2478 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 2479 | struct cgroup *cgrp; |
| 2480 | struct sock *sk; |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2481 | |
Daniel Borkmann | 2d48c5f | 2016-09-23 01:28:35 +0200 | [diff] [blame] | 2482 | sk = skb_to_full_sk(skb); |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2483 | if (!sk || !sk_fullsock(sk)) |
| 2484 | return -ENOENT; |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2485 | if (unlikely(idx >= array->map.max_entries)) |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2486 | return -E2BIG; |
| 2487 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2488 | cgrp = READ_ONCE(array->ptrs[idx]); |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2489 | if (unlikely(!cgrp)) |
| 2490 | return -EAGAIN; |
| 2491 | |
Daniel Borkmann | 54fd9c2 | 2016-08-18 01:00:41 +0200 | [diff] [blame] | 2492 | return sk_under_cgroup_hierarchy(sk, cgrp); |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2493 | } |
| 2494 | |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 2495 | static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { |
| 2496 | .func = bpf_skb_under_cgroup, |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2497 | .gpl_only = false, |
| 2498 | .ret_type = RET_INTEGER, |
| 2499 | .arg1_type = ARG_PTR_TO_CTX, |
| 2500 | .arg2_type = ARG_CONST_MAP_PTR, |
| 2501 | .arg3_type = ARG_ANYTHING, |
| 2502 | }; |
Martin KaFai Lau | 4a482f3 | 2016-06-30 10:28:44 -0700 | [diff] [blame] | 2503 | |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2504 | static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, |
| 2505 | unsigned long off, unsigned long len) |
| 2506 | { |
| 2507 | memcpy(dst_buff, src_buff + off, len); |
| 2508 | return 0; |
| 2509 | } |
| 2510 | |
Daniel Borkmann | f3694e0 | 2016-09-09 02:45:31 +0200 | [diff] [blame] | 2511 | BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, |
| 2512 | u64, flags, void *, meta, u64, meta_size) |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2513 | { |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2514 | u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2515 | |
| 2516 | if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) |
| 2517 | return -EINVAL; |
| 2518 | if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) |
| 2519 | return -EFAULT; |
| 2520 | |
| 2521 | return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size, |
| 2522 | bpf_xdp_copy); |
| 2523 | } |
| 2524 | |
| 2525 | static const struct bpf_func_proto bpf_xdp_event_output_proto = { |
| 2526 | .func = bpf_xdp_event_output, |
| 2527 | .gpl_only = true, |
| 2528 | .ret_type = RET_INTEGER, |
| 2529 | .arg1_type = ARG_PTR_TO_CTX, |
| 2530 | .arg2_type = ARG_CONST_MAP_PTR, |
| 2531 | .arg3_type = ARG_ANYTHING, |
| 2532 | .arg4_type = ARG_PTR_TO_STACK, |
| 2533 | .arg5_type = ARG_CONST_STACK_SIZE, |
| 2534 | }; |
| 2535 | |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 2536 | static const struct bpf_func_proto * |
| 2537 | sk_filter_func_proto(enum bpf_func_id func_id) |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2538 | { |
| 2539 | switch (func_id) { |
| 2540 | case BPF_FUNC_map_lookup_elem: |
| 2541 | return &bpf_map_lookup_elem_proto; |
| 2542 | case BPF_FUNC_map_update_elem: |
| 2543 | return &bpf_map_update_elem_proto; |
| 2544 | case BPF_FUNC_map_delete_elem: |
| 2545 | return &bpf_map_delete_elem_proto; |
Daniel Borkmann | 03e69b5 | 2015-03-14 02:27:16 +0100 | [diff] [blame] | 2546 | case BPF_FUNC_get_prandom_u32: |
| 2547 | return &bpf_get_prandom_u32_proto; |
Daniel Borkmann | c04167c | 2015-03-14 02:27:17 +0100 | [diff] [blame] | 2548 | case BPF_FUNC_get_smp_processor_id: |
Daniel Borkmann | 80b48c4 | 2016-06-28 12:18:26 +0200 | [diff] [blame] | 2549 | return &bpf_get_raw_smp_processor_id_proto; |
Alexei Starovoitov | 04fd61a | 2015-05-19 16:59:03 -0700 | [diff] [blame] | 2550 | case BPF_FUNC_tail_call: |
| 2551 | return &bpf_tail_call_proto; |
Daniel Borkmann | 17ca8cb | 2015-05-29 23:23:06 +0200 | [diff] [blame] | 2552 | case BPF_FUNC_ktime_get_ns: |
| 2553 | return &bpf_ktime_get_ns_proto; |
Alexei Starovoitov | 0756ea3 | 2015-06-12 19:39:13 -0700 | [diff] [blame] | 2554 | case BPF_FUNC_trace_printk: |
Alexei Starovoitov | 1be7f75 | 2015-10-07 22:23:21 -0700 | [diff] [blame] | 2555 | if (capable(CAP_SYS_ADMIN)) |
| 2556 | return bpf_get_trace_printk_proto(); |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2557 | default: |
| 2558 | return NULL; |
| 2559 | } |
| 2560 | } |
| 2561 | |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 2562 | static const struct bpf_func_proto * |
| 2563 | tc_cls_act_func_proto(enum bpf_func_id func_id) |
| 2564 | { |
| 2565 | switch (func_id) { |
| 2566 | case BPF_FUNC_skb_store_bytes: |
| 2567 | return &bpf_skb_store_bytes_proto; |
Daniel Borkmann | 05c74e5 | 2015-12-17 23:51:53 +0100 | [diff] [blame] | 2568 | case BPF_FUNC_skb_load_bytes: |
| 2569 | return &bpf_skb_load_bytes_proto; |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2570 | case BPF_FUNC_skb_pull_data: |
| 2571 | return &bpf_skb_pull_data_proto; |
Daniel Borkmann | 7d67234 | 2016-02-19 23:05:23 +0100 | [diff] [blame] | 2572 | case BPF_FUNC_csum_diff: |
| 2573 | return &bpf_csum_diff_proto; |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2574 | case BPF_FUNC_csum_update: |
| 2575 | return &bpf_csum_update_proto; |
Alexei Starovoitov | 91bc4822 | 2015-04-01 17:12:13 -0700 | [diff] [blame] | 2576 | case BPF_FUNC_l3_csum_replace: |
| 2577 | return &bpf_l3_csum_replace_proto; |
| 2578 | case BPF_FUNC_l4_csum_replace: |
| 2579 | return &bpf_l4_csum_replace_proto; |
Alexei Starovoitov | 3896d65 | 2015-06-02 16:03:14 -0700 | [diff] [blame] | 2580 | case BPF_FUNC_clone_redirect: |
| 2581 | return &bpf_clone_redirect_proto; |
Daniel Borkmann | 8d20aab | 2015-07-15 14:21:42 +0200 | [diff] [blame] | 2582 | case BPF_FUNC_get_cgroup_classid: |
| 2583 | return &bpf_get_cgroup_classid_proto; |
Alexei Starovoitov | 4e10df9 | 2015-07-20 20:34:18 -0700 | [diff] [blame] | 2584 | case BPF_FUNC_skb_vlan_push: |
| 2585 | return &bpf_skb_vlan_push_proto; |
| 2586 | case BPF_FUNC_skb_vlan_pop: |
| 2587 | return &bpf_skb_vlan_pop_proto; |
Daniel Borkmann | 6578171 | 2016-06-28 12:18:27 +0200 | [diff] [blame] | 2588 | case BPF_FUNC_skb_change_proto: |
| 2589 | return &bpf_skb_change_proto_proto; |
Daniel Borkmann | d2485c4 | 2016-06-28 12:18:28 +0200 | [diff] [blame] | 2590 | case BPF_FUNC_skb_change_type: |
| 2591 | return &bpf_skb_change_type_proto; |
Daniel Borkmann | 5293efe | 2016-08-18 01:00:39 +0200 | [diff] [blame] | 2592 | case BPF_FUNC_skb_change_tail: |
| 2593 | return &bpf_skb_change_tail_proto; |
Alexei Starovoitov | d3aa45c | 2015-07-30 15:36:57 -0700 | [diff] [blame] | 2594 | case BPF_FUNC_skb_get_tunnel_key: |
| 2595 | return &bpf_skb_get_tunnel_key_proto; |
| 2596 | case BPF_FUNC_skb_set_tunnel_key: |
Daniel Borkmann | 14ca075 | 2016-03-04 15:15:06 +0100 | [diff] [blame] | 2597 | return bpf_get_skb_set_tunnel_proto(func_id); |
| 2598 | case BPF_FUNC_skb_get_tunnel_opt: |
| 2599 | return &bpf_skb_get_tunnel_opt_proto; |
| 2600 | case BPF_FUNC_skb_set_tunnel_opt: |
| 2601 | return bpf_get_skb_set_tunnel_proto(func_id); |
Alexei Starovoitov | 27b29f6 | 2015-09-15 23:05:43 -0700 | [diff] [blame] | 2602 | case BPF_FUNC_redirect: |
| 2603 | return &bpf_redirect_proto; |
Daniel Borkmann | c46646d | 2015-09-30 01:41:51 +0200 | [diff] [blame] | 2604 | case BPF_FUNC_get_route_realm: |
| 2605 | return &bpf_get_route_realm_proto; |
Daniel Borkmann | 13c5c24 | 2016-07-03 01:28:47 +0200 | [diff] [blame] | 2606 | case BPF_FUNC_get_hash_recalc: |
| 2607 | return &bpf_get_hash_recalc_proto; |
Daniel Borkmann | 7a4b28c | 2016-09-23 01:28:37 +0200 | [diff] [blame] | 2608 | case BPF_FUNC_set_hash_invalid: |
| 2609 | return &bpf_set_hash_invalid_proto; |
Daniel Borkmann | bd570ff | 2016-04-18 21:01:24 +0200 | [diff] [blame] | 2610 | case BPF_FUNC_perf_event_output: |
Daniel Borkmann | 555c8a8 | 2016-07-14 18:08:05 +0200 | [diff] [blame] | 2611 | return &bpf_skb_event_output_proto; |
Daniel Borkmann | 80b48c4 | 2016-06-28 12:18:26 +0200 | [diff] [blame] | 2612 | case BPF_FUNC_get_smp_processor_id: |
| 2613 | return &bpf_get_smp_processor_id_proto; |
Daniel Borkmann | 747ea55 | 2016-08-12 22:17:17 +0200 | [diff] [blame] | 2614 | case BPF_FUNC_skb_under_cgroup: |
| 2615 | return &bpf_skb_under_cgroup_proto; |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 2616 | default: |
| 2617 | return sk_filter_func_proto(func_id); |
| 2618 | } |
| 2619 | } |
| 2620 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2621 | static const struct bpf_func_proto * |
| 2622 | xdp_func_proto(enum bpf_func_id func_id) |
| 2623 | { |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2624 | switch (func_id) { |
| 2625 | case BPF_FUNC_perf_event_output: |
| 2626 | return &bpf_xdp_event_output_proto; |
Daniel Borkmann | 669dc4d | 2016-09-23 01:28:36 +0200 | [diff] [blame] | 2627 | case BPF_FUNC_get_smp_processor_id: |
| 2628 | return &bpf_get_smp_processor_id_proto; |
Daniel Borkmann | 4de1696 | 2016-08-18 01:00:40 +0200 | [diff] [blame] | 2629 | default: |
| 2630 | return sk_filter_func_proto(func_id); |
| 2631 | } |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2632 | } |
| 2633 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2634 | static bool __is_valid_access(int off, int size, enum bpf_access_type type) |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2635 | { |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2636 | if (off < 0 || off >= sizeof(struct __sk_buff)) |
| 2637 | return false; |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2638 | /* The verifier guarantees that size > 0. */ |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2639 | if (off % size != 0) |
| 2640 | return false; |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2641 | if (size != sizeof(__u32)) |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2642 | return false; |
| 2643 | |
| 2644 | return true; |
| 2645 | } |
| 2646 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2647 | static bool sk_filter_is_valid_access(int off, int size, |
Alexei Starovoitov | 19de99f | 2016-06-15 18:25:38 -0700 | [diff] [blame] | 2648 | enum bpf_access_type type, |
| 2649 | enum bpf_reg_type *reg_type) |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2650 | { |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 2651 | switch (off) { |
| 2652 | case offsetof(struct __sk_buff, tc_classid): |
| 2653 | case offsetof(struct __sk_buff, data): |
| 2654 | case offsetof(struct __sk_buff, data_end): |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 2655 | return false; |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 2656 | } |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 2657 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2658 | if (type == BPF_WRITE) { |
| 2659 | switch (off) { |
| 2660 | case offsetof(struct __sk_buff, cb[0]) ... |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2661 | offsetof(struct __sk_buff, cb[4]): |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2662 | break; |
| 2663 | default: |
| 2664 | return false; |
| 2665 | } |
| 2666 | } |
| 2667 | |
| 2668 | return __is_valid_access(off, size, type); |
| 2669 | } |
| 2670 | |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2671 | static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, |
| 2672 | const struct bpf_prog *prog) |
| 2673 | { |
| 2674 | struct bpf_insn *insn = insn_buf; |
| 2675 | |
| 2676 | if (!direct_write) |
| 2677 | return 0; |
| 2678 | |
| 2679 | /* if (!skb->cloned) |
| 2680 | * goto start; |
| 2681 | * |
| 2682 | * (Fast-path, otherwise approximation that we might be |
| 2683 | * a clone, do the rest in helper.) |
| 2684 | */ |
| 2685 | *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); |
| 2686 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); |
| 2687 | *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); |
| 2688 | |
| 2689 | /* ret = bpf_skb_pull_data(skb, 0); */ |
| 2690 | *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); |
| 2691 | *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); |
| 2692 | *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, |
| 2693 | BPF_FUNC_skb_pull_data); |
| 2694 | /* if (!ret) |
| 2695 | * goto restore; |
| 2696 | * return TC_ACT_SHOT; |
| 2697 | */ |
| 2698 | *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); |
| 2699 | *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT); |
| 2700 | *insn++ = BPF_EXIT_INSN(); |
| 2701 | |
| 2702 | /* restore: */ |
| 2703 | *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); |
| 2704 | /* start: */ |
| 2705 | *insn++ = prog->insnsi[0]; |
| 2706 | |
| 2707 | return insn - insn_buf; |
| 2708 | } |
| 2709 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2710 | static bool tc_cls_act_is_valid_access(int off, int size, |
Alexei Starovoitov | 19de99f | 2016-06-15 18:25:38 -0700 | [diff] [blame] | 2711 | enum bpf_access_type type, |
| 2712 | enum bpf_reg_type *reg_type) |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2713 | { |
| 2714 | if (type == BPF_WRITE) { |
| 2715 | switch (off) { |
| 2716 | case offsetof(struct __sk_buff, mark): |
| 2717 | case offsetof(struct __sk_buff, tc_index): |
Daniel Borkmann | 754f1e6 | 2015-09-30 01:41:52 +0200 | [diff] [blame] | 2718 | case offsetof(struct __sk_buff, priority): |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2719 | case offsetof(struct __sk_buff, cb[0]) ... |
Daniel Borkmann | 09c37a2 | 2016-03-16 01:42:49 +0100 | [diff] [blame] | 2720 | offsetof(struct __sk_buff, cb[4]): |
| 2721 | case offsetof(struct __sk_buff, tc_classid): |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2722 | break; |
| 2723 | default: |
| 2724 | return false; |
| 2725 | } |
| 2726 | } |
Alexei Starovoitov | 19de99f | 2016-06-15 18:25:38 -0700 | [diff] [blame] | 2727 | |
| 2728 | switch (off) { |
| 2729 | case offsetof(struct __sk_buff, data): |
| 2730 | *reg_type = PTR_TO_PACKET; |
| 2731 | break; |
| 2732 | case offsetof(struct __sk_buff, data_end): |
| 2733 | *reg_type = PTR_TO_PACKET_END; |
| 2734 | break; |
| 2735 | } |
| 2736 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2737 | return __is_valid_access(off, size, type); |
| 2738 | } |
| 2739 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2740 | static bool __is_valid_xdp_access(int off, int size, |
| 2741 | enum bpf_access_type type) |
| 2742 | { |
| 2743 | if (off < 0 || off >= sizeof(struct xdp_md)) |
| 2744 | return false; |
| 2745 | if (off % size != 0) |
| 2746 | return false; |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 2747 | if (size != sizeof(__u32)) |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2748 | return false; |
| 2749 | |
| 2750 | return true; |
| 2751 | } |
| 2752 | |
| 2753 | static bool xdp_is_valid_access(int off, int size, |
| 2754 | enum bpf_access_type type, |
| 2755 | enum bpf_reg_type *reg_type) |
| 2756 | { |
| 2757 | if (type == BPF_WRITE) |
| 2758 | return false; |
| 2759 | |
| 2760 | switch (off) { |
| 2761 | case offsetof(struct xdp_md, data): |
| 2762 | *reg_type = PTR_TO_PACKET; |
| 2763 | break; |
| 2764 | case offsetof(struct xdp_md, data_end): |
| 2765 | *reg_type = PTR_TO_PACKET_END; |
| 2766 | break; |
| 2767 | } |
| 2768 | |
| 2769 | return __is_valid_xdp_access(off, size, type); |
| 2770 | } |
| 2771 | |
| 2772 | void bpf_warn_invalid_xdp_action(u32 act) |
| 2773 | { |
| 2774 | WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act); |
| 2775 | } |
| 2776 | EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); |
| 2777 | |
Daniel Borkmann | 374fb54 | 2016-09-09 02:45:30 +0200 | [diff] [blame] | 2778 | static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, |
| 2779 | int src_reg, int ctx_off, |
| 2780 | struct bpf_insn *insn_buf, |
| 2781 | struct bpf_prog *prog) |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2782 | { |
| 2783 | struct bpf_insn *insn = insn_buf; |
| 2784 | |
| 2785 | switch (ctx_off) { |
| 2786 | case offsetof(struct __sk_buff, len): |
| 2787 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); |
| 2788 | |
| 2789 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 2790 | offsetof(struct sk_buff, len)); |
| 2791 | break; |
| 2792 | |
Daniel Borkmann | 0b8c707 | 2015-03-19 19:38:27 +0100 | [diff] [blame] | 2793 | case offsetof(struct __sk_buff, protocol): |
| 2794 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
| 2795 | |
| 2796 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
| 2797 | offsetof(struct sk_buff, protocol)); |
| 2798 | break; |
| 2799 | |
Michal Sekletar | 27cd545 | 2015-03-24 14:48:41 +0100 | [diff] [blame] | 2800 | case offsetof(struct __sk_buff, vlan_proto): |
| 2801 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); |
| 2802 | |
| 2803 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
| 2804 | offsetof(struct sk_buff, vlan_proto)); |
| 2805 | break; |
| 2806 | |
Daniel Borkmann | bcad571 | 2015-04-03 20:52:24 +0200 | [diff] [blame] | 2807 | case offsetof(struct __sk_buff, priority): |
| 2808 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4); |
| 2809 | |
Daniel Borkmann | 754f1e6 | 2015-09-30 01:41:52 +0200 | [diff] [blame] | 2810 | if (type == BPF_WRITE) |
| 2811 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, |
| 2812 | offsetof(struct sk_buff, priority)); |
| 2813 | else |
| 2814 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 2815 | offsetof(struct sk_buff, priority)); |
Daniel Borkmann | bcad571 | 2015-04-03 20:52:24 +0200 | [diff] [blame] | 2816 | break; |
| 2817 | |
Alexei Starovoitov | 37e82c2 | 2015-05-27 15:30:39 -0700 | [diff] [blame] | 2818 | case offsetof(struct __sk_buff, ingress_ifindex): |
| 2819 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); |
| 2820 | |
| 2821 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 2822 | offsetof(struct sk_buff, skb_iif)); |
| 2823 | break; |
| 2824 | |
| 2825 | case offsetof(struct __sk_buff, ifindex): |
| 2826 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
| 2827 | |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 2828 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
Alexei Starovoitov | 37e82c2 | 2015-05-27 15:30:39 -0700 | [diff] [blame] | 2829 | dst_reg, src_reg, |
| 2830 | offsetof(struct sk_buff, dev)); |
| 2831 | *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); |
| 2832 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, |
| 2833 | offsetof(struct net_device, ifindex)); |
| 2834 | break; |
| 2835 | |
Daniel Borkmann | ba7591d | 2015-08-01 00:46:29 +0200 | [diff] [blame] | 2836 | case offsetof(struct __sk_buff, hash): |
| 2837 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); |
| 2838 | |
| 2839 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 2840 | offsetof(struct sk_buff, hash)); |
| 2841 | break; |
| 2842 | |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2843 | case offsetof(struct __sk_buff, mark): |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2844 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); |
| 2845 | |
| 2846 | if (type == BPF_WRITE) |
| 2847 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, |
| 2848 | offsetof(struct sk_buff, mark)); |
| 2849 | else |
| 2850 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, |
| 2851 | offsetof(struct sk_buff, mark)); |
| 2852 | break; |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2853 | |
| 2854 | case offsetof(struct __sk_buff, pkt_type): |
| 2855 | return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); |
| 2856 | |
| 2857 | case offsetof(struct __sk_buff, queue_mapping): |
| 2858 | return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn); |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 2859 | |
Alexei Starovoitov | c249739 | 2015-03-16 18:06:02 -0700 | [diff] [blame] | 2860 | case offsetof(struct __sk_buff, vlan_present): |
| 2861 | return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, |
| 2862 | dst_reg, src_reg, insn); |
| 2863 | |
| 2864 | case offsetof(struct __sk_buff, vlan_tci): |
| 2865 | return convert_skb_access(SKF_AD_VLAN_TAG, |
| 2866 | dst_reg, src_reg, insn); |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2867 | |
| 2868 | case offsetof(struct __sk_buff, cb[0]) ... |
Daniel Borkmann | 6088b58 | 2016-09-09 02:45:28 +0200 | [diff] [blame] | 2869 | offsetof(struct __sk_buff, cb[4]): |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2870 | BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); |
| 2871 | |
Alexei Starovoitov | ff936a0 | 2015-10-07 10:55:41 -0700 | [diff] [blame] | 2872 | prog->cb_access = 1; |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2873 | ctx_off -= offsetof(struct __sk_buff, cb[0]); |
| 2874 | ctx_off += offsetof(struct sk_buff, cb); |
| 2875 | ctx_off += offsetof(struct qdisc_skb_cb, data); |
| 2876 | if (type == BPF_WRITE) |
| 2877 | *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); |
| 2878 | else |
| 2879 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); |
| 2880 | break; |
| 2881 | |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 2882 | case offsetof(struct __sk_buff, tc_classid): |
| 2883 | ctx_off -= offsetof(struct __sk_buff, tc_classid); |
| 2884 | ctx_off += offsetof(struct sk_buff, cb); |
| 2885 | ctx_off += offsetof(struct qdisc_skb_cb, tc_classid); |
Daniel Borkmann | 09c37a2 | 2016-03-16 01:42:49 +0100 | [diff] [blame] | 2886 | if (type == BPF_WRITE) |
| 2887 | *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off); |
| 2888 | else |
| 2889 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off); |
Daniel Borkmann | 045efa8 | 2015-09-15 23:05:42 -0700 | [diff] [blame] | 2890 | break; |
| 2891 | |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 2892 | case offsetof(struct __sk_buff, data): |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 2893 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 2894 | dst_reg, src_reg, |
| 2895 | offsetof(struct sk_buff, data)); |
| 2896 | break; |
| 2897 | |
| 2898 | case offsetof(struct __sk_buff, data_end): |
| 2899 | ctx_off -= offsetof(struct __sk_buff, data_end); |
| 2900 | ctx_off += offsetof(struct sk_buff, cb); |
| 2901 | ctx_off += offsetof(struct bpf_skb_data_end, data_end); |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 2902 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg, |
| 2903 | ctx_off); |
Alexei Starovoitov | db58ba4 | 2016-05-05 19:49:12 -0700 | [diff] [blame] | 2904 | break; |
| 2905 | |
Alexei Starovoitov | d691f9e | 2015-06-04 10:11:54 -0700 | [diff] [blame] | 2906 | case offsetof(struct __sk_buff, tc_index): |
| 2907 | #ifdef CONFIG_NET_SCHED |
| 2908 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); |
| 2909 | |
| 2910 | if (type == BPF_WRITE) |
| 2911 | *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, |
| 2912 | offsetof(struct sk_buff, tc_index)); |
| 2913 | else |
| 2914 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, |
| 2915 | offsetof(struct sk_buff, tc_index)); |
| 2916 | break; |
| 2917 | #else |
| 2918 | if (type == BPF_WRITE) |
| 2919 | *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); |
| 2920 | else |
| 2921 | *insn++ = BPF_MOV64_IMM(dst_reg, 0); |
| 2922 | break; |
| 2923 | #endif |
Alexei Starovoitov | 9bac3d6 | 2015-03-13 11:57:42 -0700 | [diff] [blame] | 2924 | } |
| 2925 | |
| 2926 | return insn - insn_buf; |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2927 | } |
| 2928 | |
Daniel Borkmann | 374fb54 | 2016-09-09 02:45:30 +0200 | [diff] [blame] | 2929 | static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg, |
| 2930 | int src_reg, int ctx_off, |
| 2931 | struct bpf_insn *insn_buf, |
| 2932 | struct bpf_prog *prog) |
| 2933 | { |
| 2934 | struct bpf_insn *insn = insn_buf; |
| 2935 | |
| 2936 | switch (ctx_off) { |
| 2937 | case offsetof(struct __sk_buff, ifindex): |
| 2938 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
| 2939 | |
| 2940 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
| 2941 | dst_reg, src_reg, |
| 2942 | offsetof(struct sk_buff, dev)); |
| 2943 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, |
| 2944 | offsetof(struct net_device, ifindex)); |
| 2945 | break; |
| 2946 | default: |
| 2947 | return sk_filter_convert_ctx_access(type, dst_reg, src_reg, |
| 2948 | ctx_off, insn_buf, prog); |
| 2949 | } |
| 2950 | |
| 2951 | return insn - insn_buf; |
| 2952 | } |
| 2953 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2954 | static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg, |
| 2955 | int src_reg, int ctx_off, |
| 2956 | struct bpf_insn *insn_buf, |
| 2957 | struct bpf_prog *prog) |
| 2958 | { |
| 2959 | struct bpf_insn *insn = insn_buf; |
| 2960 | |
| 2961 | switch (ctx_off) { |
| 2962 | case offsetof(struct xdp_md, data): |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 2963 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2964 | dst_reg, src_reg, |
| 2965 | offsetof(struct xdp_buff, data)); |
| 2966 | break; |
| 2967 | case offsetof(struct xdp_md, data_end): |
Daniel Borkmann | f035a51 | 2016-09-09 02:45:29 +0200 | [diff] [blame] | 2968 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2969 | dst_reg, src_reg, |
| 2970 | offsetof(struct xdp_buff, data_end)); |
| 2971 | break; |
| 2972 | } |
| 2973 | |
| 2974 | return insn - insn_buf; |
| 2975 | } |
| 2976 | |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 2977 | static const struct bpf_verifier_ops sk_filter_ops = { |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2978 | .get_func_proto = sk_filter_func_proto, |
| 2979 | .is_valid_access = sk_filter_is_valid_access, |
Daniel Borkmann | 374fb54 | 2016-09-09 02:45:30 +0200 | [diff] [blame] | 2980 | .convert_ctx_access = sk_filter_convert_ctx_access, |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2981 | }; |
| 2982 | |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 2983 | static const struct bpf_verifier_ops tc_cls_act_ops = { |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2984 | .get_func_proto = tc_cls_act_func_proto, |
| 2985 | .is_valid_access = tc_cls_act_is_valid_access, |
Daniel Borkmann | 374fb54 | 2016-09-09 02:45:30 +0200 | [diff] [blame] | 2986 | .convert_ctx_access = tc_cls_act_convert_ctx_access, |
Daniel Borkmann | 36bbef5 | 2016-09-20 00:26:13 +0200 | [diff] [blame] | 2987 | .gen_prologue = tc_cls_act_prologue, |
Alexei Starovoitov | 608cd71 | 2015-03-26 19:53:57 -0700 | [diff] [blame] | 2988 | }; |
| 2989 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 2990 | static const struct bpf_verifier_ops xdp_ops = { |
| 2991 | .get_func_proto = xdp_func_proto, |
| 2992 | .is_valid_access = xdp_is_valid_access, |
| 2993 | .convert_ctx_access = xdp_convert_ctx_access, |
| 2994 | }; |
| 2995 | |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 2996 | static struct bpf_prog_type_list sk_filter_type __read_mostly = { |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 2997 | .ops = &sk_filter_ops, |
| 2998 | .type = BPF_PROG_TYPE_SOCKET_FILTER, |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 2999 | }; |
| 3000 | |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 3001 | static struct bpf_prog_type_list sched_cls_type __read_mostly = { |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 3002 | .ops = &tc_cls_act_ops, |
| 3003 | .type = BPF_PROG_TYPE_SCHED_CLS, |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 3004 | }; |
| 3005 | |
Daniel Borkmann | 94caee8 | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 3006 | static struct bpf_prog_type_list sched_act_type __read_mostly = { |
Daniel Borkmann | 4936e35 | 2016-05-13 19:08:26 +0200 | [diff] [blame] | 3007 | .ops = &tc_cls_act_ops, |
| 3008 | .type = BPF_PROG_TYPE_SCHED_ACT, |
Daniel Borkmann | 94caee8 | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 3009 | }; |
| 3010 | |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3011 | static struct bpf_prog_type_list xdp_type __read_mostly = { |
| 3012 | .ops = &xdp_ops, |
| 3013 | .type = BPF_PROG_TYPE_XDP, |
| 3014 | }; |
| 3015 | |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 3016 | static int __init register_sk_filter_ops(void) |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 3017 | { |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 3018 | bpf_register_prog_type(&sk_filter_type); |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 3019 | bpf_register_prog_type(&sched_cls_type); |
Daniel Borkmann | 94caee8 | 2015-03-20 15:11:11 +0100 | [diff] [blame] | 3020 | bpf_register_prog_type(&sched_act_type); |
Brenden Blanco | 6a773a1 | 2016-07-19 12:16:47 -0700 | [diff] [blame] | 3021 | bpf_register_prog_type(&xdp_type); |
Daniel Borkmann | 96be432 | 2015-03-01 12:31:46 +0100 | [diff] [blame] | 3022 | |
Alexei Starovoitov | 89aa075 | 2014-12-01 15:06:35 -0800 | [diff] [blame] | 3023 | return 0; |
| 3024 | } |
Daniel Borkmann | d4052c4 | 2015-03-01 12:31:45 +0100 | [diff] [blame] | 3025 | late_initcall(register_sk_filter_ops); |
| 3026 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 3027 | int sk_detach_filter(struct sock *sk) |
Pavel Emelyanov | 55b3332 | 2007-10-17 21:21:26 -0700 | [diff] [blame] | 3028 | { |
| 3029 | int ret = -ENOENT; |
| 3030 | struct sk_filter *filter; |
| 3031 | |
Vincent Bernat | d59577b | 2013-01-16 22:55:49 +0100 | [diff] [blame] | 3032 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
| 3033 | return -EPERM; |
| 3034 | |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 3035 | filter = rcu_dereference_protected(sk->sk_filter, |
| 3036 | lockdep_sock_is_held(sk)); |
Pavel Emelyanov | 55b3332 | 2007-10-17 21:21:26 -0700 | [diff] [blame] | 3037 | if (filter) { |
Stephen Hemminger | a9b3cd7 | 2011-08-01 16:19:00 +0000 | [diff] [blame] | 3038 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
Eric Dumazet | 46bcf14 | 2010-12-06 09:29:43 -0800 | [diff] [blame] | 3039 | sk_filter_uncharge(sk, filter); |
Pavel Emelyanov | 55b3332 | 2007-10-17 21:21:26 -0700 | [diff] [blame] | 3040 | ret = 0; |
| 3041 | } |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3042 | |
Pavel Emelyanov | 55b3332 | 2007-10-17 21:21:26 -0700 | [diff] [blame] | 3043 | return ret; |
| 3044 | } |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 3045 | EXPORT_SYMBOL_GPL(sk_detach_filter); |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3046 | |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3047 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, |
| 3048 | unsigned int len) |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3049 | { |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3050 | struct sock_fprog_kern *fprog; |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3051 | struct sk_filter *filter; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3052 | int ret = 0; |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3053 | |
| 3054 | lock_sock(sk); |
| 3055 | filter = rcu_dereference_protected(sk->sk_filter, |
Hannes Frederic Sowa | 8ced425 | 2016-04-05 17:10:16 +0200 | [diff] [blame] | 3056 | lockdep_sock_is_held(sk)); |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3057 | if (!filter) |
| 3058 | goto out; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3059 | |
| 3060 | /* We're copying the filter that has been originally attached, |
Daniel Borkmann | 93d08b6 | 2015-10-02 12:06:03 +0200 | [diff] [blame] | 3061 | * so no conversion/decode needed anymore. eBPF programs that |
| 3062 | * have no original program cannot be dumped through this. |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3063 | */ |
Daniel Borkmann | 93d08b6 | 2015-10-02 12:06:03 +0200 | [diff] [blame] | 3064 | ret = -EACCES; |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 3065 | fprog = filter->prog->orig_prog; |
Daniel Borkmann | 93d08b6 | 2015-10-02 12:06:03 +0200 | [diff] [blame] | 3066 | if (!fprog) |
| 3067 | goto out; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3068 | |
| 3069 | ret = fprog->len; |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3070 | if (!len) |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3071 | /* User space only enquires number of filter blocks. */ |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3072 | goto out; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3073 | |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3074 | ret = -EINVAL; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3075 | if (len < fprog->len) |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3076 | goto out; |
| 3077 | |
| 3078 | ret = -EFAULT; |
Alexei Starovoitov | 009937e | 2014-07-30 20:34:13 -0700 | [diff] [blame] | 3079 | if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3080 | goto out; |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3081 | |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 3082 | /* Instead of bytes, the API requests to return the number |
| 3083 | * of filter blocks. |
| 3084 | */ |
| 3085 | ret = fprog->len; |
Pavel Emelyanov | a8fc927 | 2012-11-01 02:01:48 +0000 | [diff] [blame] | 3086 | out: |
| 3087 | release_sock(sk); |
| 3088 | return ret; |
| 3089 | } |