blob: be3098fb65e45624e2e5a94b0f653d66e68293d9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01004 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01007 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
23
24#include <linux/module.h>
25#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
29#include <linux/in.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/if_packet.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <net/ip.h>
35#include <net/protocol.h>
Patrick McHardy4738c1d2008-04-10 02:02:28 -070036#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/skbuff.h>
38#include <net/sock.h>
Jiri Pirko10b89ee42015-05-12 14:56:09 +020039#include <net/flow_dissector.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/errno.h>
41#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/uaccess.h>
Dmitry Mishin40daafc2006-04-18 14:50:10 -070043#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/filter.h>
David S. Miller86e4ca62011-05-26 15:00:31 -040045#include <linux/ratelimit.h>
Will Drewry46b325c2012-04-12 16:47:52 -050046#include <linux/seccomp.h>
Eric Dumazetf3335032012-10-27 02:26:17 +000047#include <linux/if_vlan.h>
Alexei Starovoitov89aa0752014-12-01 15:06:35 -080048#include <linux/bpf.h>
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070049#include <net/sch_generic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/**
Stephen Hemminger43db6d62008-04-10 01:43:09 -070052 * sk_filter - run a packet through a socket filter
53 * @sk: sock associated with &sk_buff
54 * @skb: buffer to filter
Stephen Hemminger43db6d62008-04-10 01:43:09 -070055 *
56 * Run the filter code and then cut skb->data to correct size returned by
Li RongQing8ea6e342014-10-10 13:56:51 +080057 * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
Stephen Hemminger43db6d62008-04-10 01:43:09 -070058 * than pkt_len we keep whole skb->data. This is the socket level
Li RongQing8ea6e342014-10-10 13:56:51 +080059 * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
Stephen Hemminger43db6d62008-04-10 01:43:09 -070060 * be accepted or -EPERM if the packet should be tossed.
61 *
62 */
63int sk_filter(struct sock *sk, struct sk_buff *skb)
64{
65 int err;
66 struct sk_filter *filter;
67
Mel Gormanc93bdd02012-07-31 16:44:19 -070068 /*
69 * If the skb was allocated from pfmemalloc reserves, only
70 * allow SOCK_MEMALLOC sockets to use it as this socket is
71 * helping free memory
72 */
73 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
74 return -ENOMEM;
75
Stephen Hemminger43db6d62008-04-10 01:43:09 -070076 err = security_sock_rcv_skb(sk, skb);
77 if (err)
78 return err;
79
Eric Dumazet80f8f102011-01-18 07:46:52 +000080 rcu_read_lock();
81 filter = rcu_dereference(sk->sk_filter);
Stephen Hemminger43db6d62008-04-10 01:43:09 -070082 if (filter) {
Eric Dumazet0a148422011-04-20 09:27:32 +000083 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
Eric Dumazet0d7da9d2010-10-25 03:47:05 +000084
Stephen Hemminger43db6d62008-04-10 01:43:09 -070085 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
86 }
Eric Dumazet80f8f102011-01-18 07:46:52 +000087 rcu_read_unlock();
Stephen Hemminger43db6d62008-04-10 01:43:09 -070088
89 return err;
90}
91EXPORT_SYMBOL(sk_filter);
92
Daniel Borkmann30743832014-05-01 18:34:19 +020093static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010094{
Alexander Duyck56193d12014-09-05 19:20:26 -040095 return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010096}
97
Daniel Borkmann30743832014-05-01 18:34:19 +020098static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010099{
Daniel Borkmanneb9672f2014-05-01 18:34:20 +0200100 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100101 struct nlattr *nla;
102
103 if (skb_is_nonlinear(skb))
104 return 0;
105
Mathias Krause05ab8f22014-04-13 18:23:33 +0200106 if (skb->len < sizeof(struct nlattr))
107 return 0;
108
Daniel Borkmann30743832014-05-01 18:34:19 +0200109 if (a > skb->len - sizeof(struct nlattr))
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100110 return 0;
111
Daniel Borkmann30743832014-05-01 18:34:19 +0200112 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100113 if (nla)
114 return (void *) nla - (void *) skb->data;
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 return 0;
117}
118
Daniel Borkmann30743832014-05-01 18:34:19 +0200119static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100120{
Daniel Borkmanneb9672f2014-05-01 18:34:20 +0200121 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100122 struct nlattr *nla;
123
124 if (skb_is_nonlinear(skb))
125 return 0;
126
Mathias Krause05ab8f22014-04-13 18:23:33 +0200127 if (skb->len < sizeof(struct nlattr))
128 return 0;
129
Daniel Borkmann30743832014-05-01 18:34:19 +0200130 if (a > skb->len - sizeof(struct nlattr))
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100131 return 0;
132
Daniel Borkmann30743832014-05-01 18:34:19 +0200133 nla = (struct nlattr *) &skb->data[a];
134 if (nla->nla_len > skb->len - a)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100135 return 0;
136
Daniel Borkmann30743832014-05-01 18:34:19 +0200137 nla = nla_find_nested(nla, x);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100138 if (nla)
139 return (void *) nla - (void *) skb->data;
140
141 return 0;
142}
143
Daniel Borkmann30743832014-05-01 18:34:19 +0200144static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100145{
146 return raw_smp_processor_id();
147}
148
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700149/* note that this only generates 32-bit random numbers */
Daniel Borkmann30743832014-05-01 18:34:19 +0200150static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700151{
Daniel Borkmanneb9672f2014-05-01 18:34:20 +0200152 return prandom_u32();
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700153}
154
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700155static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
156 struct bpf_insn *insn_buf)
157{
158 struct bpf_insn *insn = insn_buf;
159
160 switch (skb_field) {
161 case SKF_AD_MARK:
162 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
163
164 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
165 offsetof(struct sk_buff, mark));
166 break;
167
168 case SKF_AD_PKTTYPE:
169 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
170 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
171#ifdef __BIG_ENDIAN_BITFIELD
172 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
173#endif
174 break;
175
176 case SKF_AD_QUEUE:
177 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
178
179 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
180 offsetof(struct sk_buff, queue_mapping));
181 break;
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700182
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700183 case SKF_AD_VLAN_TAG:
184 case SKF_AD_VLAN_TAG_PRESENT:
185 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
186 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
187
188 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
189 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
190 offsetof(struct sk_buff, vlan_tci));
191 if (skb_field == SKF_AD_VLAN_TAG) {
192 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
193 ~VLAN_TAG_PRESENT);
194 } else {
195 /* dst_reg >>= 12 */
196 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
197 /* dst_reg &= 1 */
198 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
199 }
200 break;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700201 }
202
203 return insn - insn_buf;
204}
205
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100206static bool convert_bpf_extensions(struct sock_filter *fp,
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700207 struct bpf_insn **insnp)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100208{
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700209 struct bpf_insn *insn = *insnp;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700210 u32 cnt;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100211
212 switch (fp->k) {
213 case SKF_AD_OFF + SKF_AD_PROTOCOL:
Daniel Borkmann0b8c7072015-03-19 19:38:27 +0100214 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
215
216 /* A = *(u16 *) (CTX + offsetof(protocol)) */
217 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
218 offsetof(struct sk_buff, protocol));
219 /* A = ntohs(A) [emitting a nop or swap16] */
220 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100221 break;
222
223 case SKF_AD_OFF + SKF_AD_PKTTYPE:
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700224 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
225 insn += cnt - 1;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100226 break;
227
228 case SKF_AD_OFF + SKF_AD_IFINDEX:
229 case SKF_AD_OFF + SKF_AD_HATYPE:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100230 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
231 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200232 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100233
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200234 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
235 BPF_REG_TMP, BPF_REG_CTX,
236 offsetof(struct sk_buff, dev));
237 /* if (tmp != 0) goto pc + 1 */
238 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
239 *insn++ = BPF_EXIT_INSN();
240 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
241 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
242 offsetof(struct net_device, ifindex));
243 else
244 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
245 offsetof(struct net_device, type));
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100246 break;
247
248 case SKF_AD_OFF + SKF_AD_MARK:
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700249 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
250 insn += cnt - 1;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100251 break;
252
253 case SKF_AD_OFF + SKF_AD_RXHASH:
254 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
255
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700256 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
257 offsetof(struct sk_buff, hash));
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100258 break;
259
260 case SKF_AD_OFF + SKF_AD_QUEUE:
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700261 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
262 insn += cnt - 1;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100263 break;
264
265 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700266 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
267 BPF_REG_A, BPF_REG_CTX, insn);
268 insn += cnt - 1;
269 break;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100270
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700271 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
272 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
273 BPF_REG_A, BPF_REG_CTX, insn);
274 insn += cnt - 1;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100275 break;
276
Michal Sekletar27cd5452015-03-24 14:48:41 +0100277 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
278 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
279
280 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
281 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
282 offsetof(struct sk_buff, vlan_proto));
283 /* A = ntohs(A) [emitting a nop or swap16] */
284 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
285 break;
286
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100287 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
288 case SKF_AD_OFF + SKF_AD_NLATTR:
289 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
290 case SKF_AD_OFF + SKF_AD_CPU:
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700291 case SKF_AD_OFF + SKF_AD_RANDOM:
Alexei Starovoitove430f342014-06-06 14:46:06 -0700292 /* arg1 = CTX */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200293 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100294 /* arg2 = A */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200295 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100296 /* arg3 = X */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200297 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
Alexei Starovoitove430f342014-06-06 14:46:06 -0700298 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100299 switch (fp->k) {
300 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200301 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100302 break;
303 case SKF_AD_OFF + SKF_AD_NLATTR:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200304 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100305 break;
306 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200307 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100308 break;
309 case SKF_AD_OFF + SKF_AD_CPU:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200310 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100311 break;
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700312 case SKF_AD_OFF + SKF_AD_RANDOM:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200313 *insn = BPF_EMIT_CALL(__get_random_u32);
Chema Gonzalez4cd36752014-04-21 09:21:24 -0700314 break;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100315 }
316 break;
317
318 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700319 /* A ^= X */
320 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100321 break;
322
323 default:
324 /* This is just a dummy call to avoid letting the compiler
325 * evict __bpf_call_base() as an optimization. Placed here
326 * where no-one bothers.
327 */
328 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
329 return false;
330 }
331
332 *insnp = insn;
333 return true;
334}
335
336/**
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700337 * bpf_convert_filter - convert filter program
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100338 * @prog: the user passed filter program
339 * @len: the length of the user passed filter program
340 * @new_prog: buffer where converted program will be stored
341 * @new_len: pointer to store length of converted program
342 *
343 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
344 * Conversion workflow:
345 *
346 * 1) First pass for calculating the new program length:
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700347 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100348 *
349 * 2) 2nd pass to remap in two passes: 1st pass finds new
350 * jump offsets, 2nd pass remapping:
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700351 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700352 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100353 *
354 * User BPF's register A is mapped to our BPF register 6, user BPF
355 * register X is mapped to BPF register 7; frame pointer is always
356 * register 10; Context 'void *ctx' is stored in register 1, that is,
357 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
358 * ctx == 'struct seccomp_data *'.
359 */
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200360static int bpf_convert_filter(struct sock_filter *prog, int len,
361 struct bpf_insn *new_prog, int *new_len)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100362{
363 int new_flen = 0, pass = 0, target, i;
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700364 struct bpf_insn *new_insn;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100365 struct sock_filter *fp;
366 int *addrs = NULL;
367 u8 bpf_src;
368
369 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
Daniel Borkmann30743832014-05-01 18:34:19 +0200370 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100371
Kees Cook6f9a0932014-06-18 15:34:57 -0700372 if (len <= 0 || len > BPF_MAXINSNS)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100373 return -EINVAL;
374
375 if (new_prog) {
Daniel Borkmann658da932015-05-06 16:12:29 +0200376 addrs = kcalloc(len, sizeof(*addrs),
377 GFP_KERNEL | __GFP_NOWARN);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100378 if (!addrs)
379 return -ENOMEM;
380 }
381
382do_pass:
383 new_insn = new_prog;
384 fp = prog;
385
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200386 if (new_insn)
387 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100388 new_insn++;
389
390 for (i = 0; i < len; fp++, i++) {
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700391 struct bpf_insn tmp_insns[6] = { };
392 struct bpf_insn *insn = tmp_insns;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100393
394 if (addrs)
395 addrs[i] = new_insn - new_prog;
396
397 switch (fp->code) {
398 /* All arithmetic insns and skb loads map as-is. */
399 case BPF_ALU | BPF_ADD | BPF_X:
400 case BPF_ALU | BPF_ADD | BPF_K:
401 case BPF_ALU | BPF_SUB | BPF_X:
402 case BPF_ALU | BPF_SUB | BPF_K:
403 case BPF_ALU | BPF_AND | BPF_X:
404 case BPF_ALU | BPF_AND | BPF_K:
405 case BPF_ALU | BPF_OR | BPF_X:
406 case BPF_ALU | BPF_OR | BPF_K:
407 case BPF_ALU | BPF_LSH | BPF_X:
408 case BPF_ALU | BPF_LSH | BPF_K:
409 case BPF_ALU | BPF_RSH | BPF_X:
410 case BPF_ALU | BPF_RSH | BPF_K:
411 case BPF_ALU | BPF_XOR | BPF_X:
412 case BPF_ALU | BPF_XOR | BPF_K:
413 case BPF_ALU | BPF_MUL | BPF_X:
414 case BPF_ALU | BPF_MUL | BPF_K:
415 case BPF_ALU | BPF_DIV | BPF_X:
416 case BPF_ALU | BPF_DIV | BPF_K:
417 case BPF_ALU | BPF_MOD | BPF_X:
418 case BPF_ALU | BPF_MOD | BPF_K:
419 case BPF_ALU | BPF_NEG:
420 case BPF_LD | BPF_ABS | BPF_W:
421 case BPF_LD | BPF_ABS | BPF_H:
422 case BPF_LD | BPF_ABS | BPF_B:
423 case BPF_LD | BPF_IND | BPF_W:
424 case BPF_LD | BPF_IND | BPF_H:
425 case BPF_LD | BPF_IND | BPF_B:
426 /* Check for overloaded BPF extension and
427 * directly convert it if found, otherwise
428 * just move on with mapping.
429 */
430 if (BPF_CLASS(fp->code) == BPF_LD &&
431 BPF_MODE(fp->code) == BPF_ABS &&
432 convert_bpf_extensions(fp, &insn))
433 break;
434
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200435 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100436 break;
437
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200438 /* Jump transformation cannot use BPF block macros
439 * everywhere as offset calculation and target updates
440 * require a bit more work than the rest, i.e. jump
441 * opcodes map as-is, but offsets need adjustment.
442 */
443
444#define BPF_EMIT_JMP \
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100445 do { \
446 if (target >= len || target < 0) \
447 goto err; \
448 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
449 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
450 insn->off -= insn - tmp_insns; \
451 } while (0)
452
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200453 case BPF_JMP | BPF_JA:
454 target = i + fp->k + 1;
455 insn->code = fp->code;
456 BPF_EMIT_JMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100457 break;
458
459 case BPF_JMP | BPF_JEQ | BPF_K:
460 case BPF_JMP | BPF_JEQ | BPF_X:
461 case BPF_JMP | BPF_JSET | BPF_K:
462 case BPF_JMP | BPF_JSET | BPF_X:
463 case BPF_JMP | BPF_JGT | BPF_K:
464 case BPF_JMP | BPF_JGT | BPF_X:
465 case BPF_JMP | BPF_JGE | BPF_K:
466 case BPF_JMP | BPF_JGE | BPF_X:
467 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
468 /* BPF immediates are signed, zero extend
469 * immediate into tmp register and use it
470 * in compare insn.
471 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200472 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100473
Alexei Starovoitove430f342014-06-06 14:46:06 -0700474 insn->dst_reg = BPF_REG_A;
475 insn->src_reg = BPF_REG_TMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100476 bpf_src = BPF_X;
477 } else {
Alexei Starovoitove430f342014-06-06 14:46:06 -0700478 insn->dst_reg = BPF_REG_A;
479 insn->src_reg = BPF_REG_X;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100480 insn->imm = fp->k;
481 bpf_src = BPF_SRC(fp->code);
482 }
483
484 /* Common case where 'jump_false' is next insn. */
485 if (fp->jf == 0) {
486 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
487 target = i + fp->jt + 1;
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200488 BPF_EMIT_JMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100489 break;
490 }
491
492 /* Convert JEQ into JNE when 'jump_true' is next insn. */
493 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
494 insn->code = BPF_JMP | BPF_JNE | bpf_src;
495 target = i + fp->jf + 1;
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200496 BPF_EMIT_JMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100497 break;
498 }
499
500 /* Other jumps are mapped into two insns: Jxx and JA. */
501 target = i + fp->jt + 1;
502 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200503 BPF_EMIT_JMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100504 insn++;
505
506 insn->code = BPF_JMP | BPF_JA;
507 target = i + fp->jf + 1;
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200508 BPF_EMIT_JMP;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100509 break;
510
511 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
512 case BPF_LDX | BPF_MSH | BPF_B:
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700513 /* tmp = A */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200514 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
David S. Miller1268e252014-05-13 13:13:33 -0400515 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200516 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700517 /* A &= 0xf */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200518 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700519 /* A <<= 2 */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200520 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700521 /* X = A */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200522 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700523 /* A = tmp */
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200524 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100525 break;
526
527 /* RET_K, RET_A are remaped into 2 insns. */
528 case BPF_RET | BPF_A:
529 case BPF_RET | BPF_K:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200530 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
531 BPF_K : BPF_X, BPF_REG_0,
532 BPF_REG_A, fp->k);
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700533 *insn = BPF_EXIT_INSN();
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100534 break;
535
536 /* Store to stack. */
537 case BPF_ST:
538 case BPF_STX:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200539 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
540 BPF_ST ? BPF_REG_A : BPF_REG_X,
541 -(BPF_MEMWORDS - fp->k) * 4);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100542 break;
543
544 /* Load from stack. */
545 case BPF_LD | BPF_MEM:
546 case BPF_LDX | BPF_MEM:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200547 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
548 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
549 -(BPF_MEMWORDS - fp->k) * 4);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100550 break;
551
552 /* A = K or X = K */
553 case BPF_LD | BPF_IMM:
554 case BPF_LDX | BPF_IMM:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200555 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
556 BPF_REG_A : BPF_REG_X, fp->k);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100557 break;
558
559 /* X = A */
560 case BPF_MISC | BPF_TAX:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200561 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100562 break;
563
564 /* A = X */
565 case BPF_MISC | BPF_TXA:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200566 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100567 break;
568
569 /* A = skb->len or X = skb->len */
570 case BPF_LD | BPF_W | BPF_LEN:
571 case BPF_LDX | BPF_W | BPF_LEN:
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200572 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
573 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
574 offsetof(struct sk_buff, len));
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100575 break;
576
Daniel Borkmannf8f6d672014-05-29 10:22:51 +0200577 /* Access seccomp_data fields. */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100578 case BPF_LDX | BPF_ABS | BPF_W:
Alexei Starovoitov9739eef2014-05-08 14:10:51 -0700579 /* A = *(u32 *) (ctx + K) */
580 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100581 break;
582
Stephen Hemmingerca9f1fd2015-02-14 13:47:54 -0500583 /* Unknown instruction. */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100584 default:
585 goto err;
586 }
587
588 insn++;
589 if (new_prog)
590 memcpy(new_insn, tmp_insns,
591 sizeof(*insn) * (insn - tmp_insns));
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100592 new_insn += insn - tmp_insns;
593 }
594
595 if (!new_prog) {
596 /* Only calculating new length. */
597 *new_len = new_insn - new_prog;
598 return 0;
599 }
600
601 pass++;
602 if (new_flen != new_insn - new_prog) {
603 new_flen = new_insn - new_prog;
604 if (pass > 2)
605 goto err;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100606 goto do_pass;
607 }
608
609 kfree(addrs);
610 BUG_ON(*new_len != new_flen);
611 return 0;
612err:
613 kfree(addrs);
614 return -EINVAL;
615}
616
617/* Security:
618 *
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000619 * As we dont want to clear mem[] array for each packet going through
Li RongQing8ea6e342014-10-10 13:56:51 +0800620 * __bpf_prog_run(), we check that filter loaded by user never try to read
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000621 * a cell if not previously written, and we check all branches to be sure
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300622 * a malicious user doesn't try to abuse us.
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000623 */
Eric Dumazetec31a052014-07-12 15:49:16 +0200624static int check_load_and_stores(const struct sock_filter *filter, int flen)
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000625{
Daniel Borkmann34805932014-05-29 10:22:50 +0200626 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000627 int pc, ret = 0;
628
629 BUILD_BUG_ON(BPF_MEMWORDS > 16);
Daniel Borkmann34805932014-05-29 10:22:50 +0200630
Tobias Klauser99e72a02014-06-24 15:33:22 +0200631 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000632 if (!masks)
633 return -ENOMEM;
Daniel Borkmann34805932014-05-29 10:22:50 +0200634
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000635 memset(masks, 0xff, flen * sizeof(*masks));
636
637 for (pc = 0; pc < flen; pc++) {
638 memvalid &= masks[pc];
639
640 switch (filter[pc].code) {
Daniel Borkmann34805932014-05-29 10:22:50 +0200641 case BPF_ST:
642 case BPF_STX:
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000643 memvalid |= (1 << filter[pc].k);
644 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200645 case BPF_LD | BPF_MEM:
646 case BPF_LDX | BPF_MEM:
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000647 if (!(memvalid & (1 << filter[pc].k))) {
648 ret = -EINVAL;
649 goto error;
650 }
651 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200652 case BPF_JMP | BPF_JA:
653 /* A jump must set masks on target */
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000654 masks[pc + 1 + filter[pc].k] &= memvalid;
655 memvalid = ~0;
656 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200657 case BPF_JMP | BPF_JEQ | BPF_K:
658 case BPF_JMP | BPF_JEQ | BPF_X:
659 case BPF_JMP | BPF_JGE | BPF_K:
660 case BPF_JMP | BPF_JGE | BPF_X:
661 case BPF_JMP | BPF_JGT | BPF_K:
662 case BPF_JMP | BPF_JGT | BPF_X:
663 case BPF_JMP | BPF_JSET | BPF_K:
664 case BPF_JMP | BPF_JSET | BPF_X:
665 /* A jump must set masks on targets */
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000666 masks[pc + 1 + filter[pc].jt] &= memvalid;
667 masks[pc + 1 + filter[pc].jf] &= memvalid;
668 memvalid = ~0;
669 break;
670 }
671 }
672error:
673 kfree(masks);
674 return ret;
675}
676
Daniel Borkmann34805932014-05-29 10:22:50 +0200677static bool chk_code_allowed(u16 code_to_probe)
678{
679 static const bool codes[] = {
680 /* 32 bit ALU operations */
681 [BPF_ALU | BPF_ADD | BPF_K] = true,
682 [BPF_ALU | BPF_ADD | BPF_X] = true,
683 [BPF_ALU | BPF_SUB | BPF_K] = true,
684 [BPF_ALU | BPF_SUB | BPF_X] = true,
685 [BPF_ALU | BPF_MUL | BPF_K] = true,
686 [BPF_ALU | BPF_MUL | BPF_X] = true,
687 [BPF_ALU | BPF_DIV | BPF_K] = true,
688 [BPF_ALU | BPF_DIV | BPF_X] = true,
689 [BPF_ALU | BPF_MOD | BPF_K] = true,
690 [BPF_ALU | BPF_MOD | BPF_X] = true,
691 [BPF_ALU | BPF_AND | BPF_K] = true,
692 [BPF_ALU | BPF_AND | BPF_X] = true,
693 [BPF_ALU | BPF_OR | BPF_K] = true,
694 [BPF_ALU | BPF_OR | BPF_X] = true,
695 [BPF_ALU | BPF_XOR | BPF_K] = true,
696 [BPF_ALU | BPF_XOR | BPF_X] = true,
697 [BPF_ALU | BPF_LSH | BPF_K] = true,
698 [BPF_ALU | BPF_LSH | BPF_X] = true,
699 [BPF_ALU | BPF_RSH | BPF_K] = true,
700 [BPF_ALU | BPF_RSH | BPF_X] = true,
701 [BPF_ALU | BPF_NEG] = true,
702 /* Load instructions */
703 [BPF_LD | BPF_W | BPF_ABS] = true,
704 [BPF_LD | BPF_H | BPF_ABS] = true,
705 [BPF_LD | BPF_B | BPF_ABS] = true,
706 [BPF_LD | BPF_W | BPF_LEN] = true,
707 [BPF_LD | BPF_W | BPF_IND] = true,
708 [BPF_LD | BPF_H | BPF_IND] = true,
709 [BPF_LD | BPF_B | BPF_IND] = true,
710 [BPF_LD | BPF_IMM] = true,
711 [BPF_LD | BPF_MEM] = true,
712 [BPF_LDX | BPF_W | BPF_LEN] = true,
713 [BPF_LDX | BPF_B | BPF_MSH] = true,
714 [BPF_LDX | BPF_IMM] = true,
715 [BPF_LDX | BPF_MEM] = true,
716 /* Store instructions */
717 [BPF_ST] = true,
718 [BPF_STX] = true,
719 /* Misc instructions */
720 [BPF_MISC | BPF_TAX] = true,
721 [BPF_MISC | BPF_TXA] = true,
722 /* Return instructions */
723 [BPF_RET | BPF_K] = true,
724 [BPF_RET | BPF_A] = true,
725 /* Jump instructions */
726 [BPF_JMP | BPF_JA] = true,
727 [BPF_JMP | BPF_JEQ | BPF_K] = true,
728 [BPF_JMP | BPF_JEQ | BPF_X] = true,
729 [BPF_JMP | BPF_JGE | BPF_K] = true,
730 [BPF_JMP | BPF_JGE | BPF_X] = true,
731 [BPF_JMP | BPF_JGT | BPF_K] = true,
732 [BPF_JMP | BPF_JGT | BPF_X] = true,
733 [BPF_JMP | BPF_JSET | BPF_K] = true,
734 [BPF_JMP | BPF_JSET | BPF_X] = true,
735 };
736
737 if (code_to_probe >= ARRAY_SIZE(codes))
738 return false;
739
740 return codes[code_to_probe];
741}
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743/**
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -0700744 * bpf_check_classic - verify socket filter code
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 * @filter: filter to verify
746 * @flen: length of filter
747 *
748 * Check the user's filter code. If we let some ugly
749 * filter code slip through kaboom! The filter must contain
Kris Katterjohn93699862006-01-04 13:58:36 -0800750 * no references or jumps that are out of range, no illegal
751 * instructions, and must end with a RET instruction.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 *
Kris Katterjohn7b11f692006-01-13 14:33:06 -0800753 * All jumps are forward as they are not signed.
754 *
755 * Returns 0 if the rule set is legal or -EINVAL if not.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 */
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200757static int bpf_check_classic(const struct sock_filter *filter,
758 unsigned int flen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
Daniel Borkmannaa1113d2012-12-28 10:50:17 +0000760 bool anc_found;
Daniel Borkmann34805932014-05-29 10:22:50 +0200761 int pc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
David S. Miller1b93ae642005-12-27 13:57:59 -0800763 if (flen == 0 || flen > BPF_MAXINSNS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return -EINVAL;
765
Daniel Borkmann34805932014-05-29 10:22:50 +0200766 /* Check the filter code now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 for (pc = 0; pc < flen; pc++) {
Eric Dumazetec31a052014-07-12 15:49:16 +0200768 const struct sock_filter *ftest = &filter[pc];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Daniel Borkmann34805932014-05-29 10:22:50 +0200770 /* May we actually operate on this code? */
771 if (!chk_code_allowed(ftest->code))
Tetsuo Handacba328f2010-11-16 15:19:51 +0000772 return -EINVAL;
Daniel Borkmann34805932014-05-29 10:22:50 +0200773
Kris Katterjohn93699862006-01-04 13:58:36 -0800774 /* Some instructions need special checks */
Daniel Borkmann34805932014-05-29 10:22:50 +0200775 switch (ftest->code) {
776 case BPF_ALU | BPF_DIV | BPF_K:
777 case BPF_ALU | BPF_MOD | BPF_K:
778 /* Check for division by zero */
Eric Dumazetb6069a92012-09-07 22:03:35 +0000779 if (ftest->k == 0)
780 return -EINVAL;
781 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200782 case BPF_LD | BPF_MEM:
783 case BPF_LDX | BPF_MEM:
784 case BPF_ST:
785 case BPF_STX:
786 /* Check for invalid memory addresses */
Kris Katterjohn93699862006-01-04 13:58:36 -0800787 if (ftest->k >= BPF_MEMWORDS)
788 return -EINVAL;
Hagen Paul Pfeifer01f2f3f2010-06-19 17:05:36 +0000789 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200790 case BPF_JMP | BPF_JA:
791 /* Note, the large ftest->k might cause loops.
Kris Katterjohn93699862006-01-04 13:58:36 -0800792 * Compare this with conditional jumps below,
793 * where offsets are limited. --ANK (981016)
794 */
Daniel Borkmann34805932014-05-29 10:22:50 +0200795 if (ftest->k >= (unsigned int)(flen - pc - 1))
Kris Katterjohn93699862006-01-04 13:58:36 -0800796 return -EINVAL;
797 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200798 case BPF_JMP | BPF_JEQ | BPF_K:
799 case BPF_JMP | BPF_JEQ | BPF_X:
800 case BPF_JMP | BPF_JGE | BPF_K:
801 case BPF_JMP | BPF_JGE | BPF_X:
802 case BPF_JMP | BPF_JGT | BPF_K:
803 case BPF_JMP | BPF_JGT | BPF_X:
804 case BPF_JMP | BPF_JSET | BPF_K:
805 case BPF_JMP | BPF_JSET | BPF_X:
806 /* Both conditionals must be safe */
Hagen Paul Pfeifer01f2f3f2010-06-19 17:05:36 +0000807 if (pc + ftest->jt + 1 >= flen ||
808 pc + ftest->jf + 1 >= flen)
809 return -EINVAL;
Tetsuo Handacba328f2010-11-16 15:19:51 +0000810 break;
Daniel Borkmann34805932014-05-29 10:22:50 +0200811 case BPF_LD | BPF_W | BPF_ABS:
812 case BPF_LD | BPF_H | BPF_ABS:
813 case BPF_LD | BPF_B | BPF_ABS:
Daniel Borkmannaa1113d2012-12-28 10:50:17 +0000814 anc_found = false;
Daniel Borkmann34805932014-05-29 10:22:50 +0200815 if (bpf_anc_helper(ftest) & BPF_ANC)
816 anc_found = true;
817 /* Ancillary operation unknown or unsupported */
Daniel Borkmannaa1113d2012-12-28 10:50:17 +0000818 if (anc_found == false && ftest->k >= SKF_AD_OFF)
819 return -EINVAL;
Hagen Paul Pfeifer01f2f3f2010-06-19 17:05:36 +0000820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 }
822
Daniel Borkmann34805932014-05-29 10:22:50 +0200823 /* Last instruction must be a RET code */
Hagen Paul Pfeifer01f2f3f2010-06-19 17:05:36 +0000824 switch (filter[flen - 1].code) {
Daniel Borkmann34805932014-05-29 10:22:50 +0200825 case BPF_RET | BPF_K:
826 case BPF_RET | BPF_A:
Eric Dumazet2d5311e2010-12-01 20:46:24 +0000827 return check_load_and_stores(filter, flen);
Tetsuo Handacba328f2010-11-16 15:19:51 +0000828 }
Daniel Borkmann34805932014-05-29 10:22:50 +0200829
Tetsuo Handacba328f2010-11-16 15:19:51 +0000830 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831}
832
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700833static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
834 const struct sock_fprog *fprog)
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100835{
Alexei Starovoitov009937e2014-07-30 20:34:13 -0700836 unsigned int fsize = bpf_classic_proglen(fprog);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100837 struct sock_fprog_kern *fkprog;
838
839 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
840 if (!fp->orig_prog)
841 return -ENOMEM;
842
843 fkprog = fp->orig_prog;
844 fkprog->len = fprog->len;
Daniel Borkmann658da932015-05-06 16:12:29 +0200845
846 fkprog->filter = kmemdup(fp->insns, fsize,
847 GFP_KERNEL | __GFP_NOWARN);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100848 if (!fkprog->filter) {
849 kfree(fp->orig_prog);
850 return -ENOMEM;
851 }
852
853 return 0;
854}
855
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700856static void bpf_release_orig_filter(struct bpf_prog *fp)
Daniel Borkmanna3ea2692014-03-28 18:58:19 +0100857{
858 struct sock_fprog_kern *fprog = fp->orig_prog;
859
860 if (fprog) {
861 kfree(fprog->filter);
862 kfree(fprog);
863 }
864}
865
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700866static void __bpf_prog_release(struct bpf_prog *prog)
867{
Daniel Borkmann24701ec2015-03-01 12:31:47 +0100868 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800869 bpf_prog_put(prog);
870 } else {
871 bpf_release_orig_filter(prog);
872 bpf_prog_free(prog);
873 }
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700874}
875
Pablo Neira34c5bd62014-07-29 17:36:28 +0200876static void __sk_filter_release(struct sk_filter *fp)
877{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700878 __bpf_prog_release(fp->prog);
879 kfree(fp);
Pablo Neira34c5bd62014-07-29 17:36:28 +0200880}
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882/**
Eric Dumazet46bcf142010-12-06 09:29:43 -0800883 * sk_filter_release_rcu - Release a socket filter by rcu_head
Pavel Emelyanov47e958e2007-10-17 21:22:42 -0700884 * @rcu: rcu_head that contains the sk_filter to free
885 */
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100886static void sk_filter_release_rcu(struct rcu_head *rcu)
Pavel Emelyanov47e958e2007-10-17 21:22:42 -0700887{
888 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
889
Pablo Neira34c5bd62014-07-29 17:36:28 +0200890 __sk_filter_release(fp);
Pavel Emelyanov47e958e2007-10-17 21:22:42 -0700891}
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100892
893/**
894 * sk_filter_release - release a socket filter
895 * @fp: filter to remove
896 *
897 * Remove a filter from a socket and release its resources.
898 */
899static void sk_filter_release(struct sk_filter *fp)
900{
901 if (atomic_dec_and_test(&fp->refcnt))
902 call_rcu(&fp->rcu, sk_filter_release_rcu);
903}
904
905void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
906{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700907 u32 filter_size = bpf_prog_size(fp->prog->len);
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700908
909 atomic_sub(filter_size, &sk->sk_omem_alloc);
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100910 sk_filter_release(fp);
911}
912
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700913/* try to charge the socket memory if there is space available
914 * return true on success
915 */
916bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
Daniel Borkmannfbc907f2014-03-28 18:58:20 +0100917{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700918 u32 filter_size = bpf_prog_size(fp->prog->len);
Pavel Emelyanov47e958e2007-10-17 21:22:42 -0700919
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700920 /* same check as in sock_kmalloc() */
921 if (filter_size <= sysctl_optmem_max &&
922 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
923 atomic_inc(&fp->refcnt);
924 atomic_add(filter_size, &sk->sk_omem_alloc);
925 return true;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100926 }
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700927 return false;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100928}
929
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700930static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100931{
932 struct sock_filter *old_prog;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700933 struct bpf_prog *old_fp;
Daniel Borkmann34805932014-05-29 10:22:50 +0200934 int err, new_len, old_len = fp->len;
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100935
936 /* We are free to overwrite insns et al right here as it
937 * won't be used at this point in time anymore internally
938 * after the migration to the internal BPF instruction
939 * representation.
940 */
941 BUILD_BUG_ON(sizeof(struct sock_filter) !=
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700942 sizeof(struct bpf_insn));
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100943
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100944 /* Conversion cannot happen on overlapping memory areas,
945 * so we need to keep the user BPF around until the 2nd
946 * pass. At this time, the user BPF is stored in fp->insns.
947 */
948 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
Daniel Borkmann658da932015-05-06 16:12:29 +0200949 GFP_KERNEL | __GFP_NOWARN);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100950 if (!old_prog) {
951 err = -ENOMEM;
952 goto out_err;
953 }
954
955 /* 1st pass: calculate the new program length. */
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700956 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100957 if (err)
958 goto out_err_free;
959
960 /* Expand fp for appending the new filter representation. */
961 old_fp = fp;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200962 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100963 if (!fp) {
964 /* The old_fp is still around in case we couldn't
965 * allocate new memory, so uncharge on that one.
966 */
967 fp = old_fp;
968 err = -ENOMEM;
969 goto out_err_free;
970 }
971
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100972 fp->len = new_len;
973
Alexei Starovoitov2695fb52014-07-24 16:38:21 -0700974 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700975 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100976 if (err)
Alexei Starovoitov8fb575c2014-07-30 20:34:15 -0700977 /* 2nd bpf_convert_filter() can fail only if it fails
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100978 * to allocate memory, remapping must succeed. Note,
979 * that at this time old_fp has already been released
Alexei Starovoitov278571b2014-07-30 20:34:12 -0700980 * by krealloc().
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100981 */
982 goto out_err_free;
983
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700984 bpf_prog_select_runtime(fp);
Alexei Starovoitov5fe821a2014-05-19 14:56:14 -0700985
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100986 kfree(old_prog);
987 return fp;
988
989out_err_free:
990 kfree(old_prog);
991out_err:
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700992 __bpf_prog_release(fp);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100993 return ERR_PTR(err);
994}
995
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200996static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
997 bpf_aux_classic_check_t trans)
Jiri Pirko302d6632012-03-31 11:01:19 +0000998{
999 int err;
1000
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001001 fp->bpf_func = NULL;
Daniel Borkmann286aad32014-09-08 08:04:49 +02001002 fp->jited = false;
Jiri Pirko302d6632012-03-31 11:01:19 +00001003
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -07001004 err = bpf_check_classic(fp->insns, fp->len);
Leon Yu418c96a2014-06-01 05:37:25 +00001005 if (err) {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001006 __bpf_prog_release(fp);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001007 return ERR_PTR(err);
Leon Yu418c96a2014-06-01 05:37:25 +00001008 }
Jiri Pirko302d6632012-03-31 11:01:19 +00001009
Nicolas Schichan4ae92bc2015-05-06 16:12:27 +02001010 /* There might be additional checks and transformations
1011 * needed on classic filters, f.e. in case of seccomp.
1012 */
1013 if (trans) {
1014 err = trans(fp->insns, fp->len);
1015 if (err) {
1016 __bpf_prog_release(fp);
1017 return ERR_PTR(err);
1018 }
1019 }
1020
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001021 /* Probe if we can JIT compile the filter and if so, do
1022 * the compilation of the filter.
1023 */
Jiri Pirko302d6632012-03-31 11:01:19 +00001024 bpf_jit_compile(fp);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001025
1026 /* JIT compiler couldn't process this filter, so do the
1027 * internal BPF translation for the optimized interpreter.
1028 */
Alexei Starovoitov5fe821a2014-05-19 14:56:14 -07001029 if (!fp->jited)
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001030 fp = bpf_migrate_filter(fp);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001031
1032 return fp;
Jiri Pirko302d6632012-03-31 11:01:19 +00001033}
1034
1035/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001036 * bpf_prog_create - create an unattached filter
Randy Dunlapc6c4b972012-06-08 14:01:44 +00001037 * @pfp: the unattached filter that is created
Tobias Klauser677a9fd2014-06-24 15:33:21 +02001038 * @fprog: the filter program
Jiri Pirko302d6632012-03-31 11:01:19 +00001039 *
Randy Dunlapc6c4b972012-06-08 14:01:44 +00001040 * Create a filter independent of any socket. We first run some
Jiri Pirko302d6632012-03-31 11:01:19 +00001041 * sanity checks on it to make sure it does not explode on us later.
1042 * If an error occurs or there is insufficient memory for the filter
1043 * a negative errno code is returned. On success the return is zero.
1044 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001045int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
Jiri Pirko302d6632012-03-31 11:01:19 +00001046{
Alexei Starovoitov009937e2014-07-30 20:34:13 -07001047 unsigned int fsize = bpf_classic_proglen(fprog);
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001048 struct bpf_prog *fp;
Jiri Pirko302d6632012-03-31 11:01:19 +00001049
1050 /* Make sure new filter is there and in the right amounts. */
1051 if (fprog->filter == NULL)
1052 return -EINVAL;
1053
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001054 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
Jiri Pirko302d6632012-03-31 11:01:19 +00001055 if (!fp)
1056 return -ENOMEM;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001057
Jiri Pirko302d6632012-03-31 11:01:19 +00001058 memcpy(fp->insns, fprog->filter, fsize);
1059
Jiri Pirko302d6632012-03-31 11:01:19 +00001060 fp->len = fprog->len;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001061 /* Since unattached filters are not copied back to user
1062 * space through sk_get_filter(), we do not need to hold
1063 * a copy here, and can spare us the work.
1064 */
1065 fp->orig_prog = NULL;
Jiri Pirko302d6632012-03-31 11:01:19 +00001066
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001067 /* bpf_prepare_filter() already takes care of freeing
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001068 * memory in case something goes wrong.
1069 */
Nicolas Schichan4ae92bc2015-05-06 16:12:27 +02001070 fp = bpf_prepare_filter(fp, NULL);
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001071 if (IS_ERR(fp))
1072 return PTR_ERR(fp);
Jiri Pirko302d6632012-03-31 11:01:19 +00001073
1074 *pfp = fp;
1075 return 0;
Jiri Pirko302d6632012-03-31 11:01:19 +00001076}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001077EXPORT_SYMBOL_GPL(bpf_prog_create);
Jiri Pirko302d6632012-03-31 11:01:19 +00001078
Daniel Borkmannac67eb22015-05-06 16:12:30 +02001079/**
1080 * bpf_prog_create_from_user - create an unattached filter from user buffer
1081 * @pfp: the unattached filter that is created
1082 * @fprog: the filter program
1083 * @trans: post-classic verifier transformation handler
1084 *
1085 * This function effectively does the same as bpf_prog_create(), only
1086 * that it builds up its insns buffer from user space provided buffer.
1087 * It also allows for passing a bpf_aux_classic_check_t handler.
1088 */
1089int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1090 bpf_aux_classic_check_t trans)
1091{
1092 unsigned int fsize = bpf_classic_proglen(fprog);
1093 struct bpf_prog *fp;
1094
1095 /* Make sure new filter is there and in the right amounts. */
1096 if (fprog->filter == NULL)
1097 return -EINVAL;
1098
1099 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1100 if (!fp)
1101 return -ENOMEM;
1102
1103 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1104 __bpf_prog_free(fp);
1105 return -EFAULT;
1106 }
1107
1108 fp->len = fprog->len;
1109 /* Since unattached filters are not copied back to user
1110 * space through sk_get_filter(), we do not need to hold
1111 * a copy here, and can spare us the work.
1112 */
1113 fp->orig_prog = NULL;
1114
1115 /* bpf_prepare_filter() already takes care of freeing
1116 * memory in case something goes wrong.
1117 */
1118 fp = bpf_prepare_filter(fp, trans);
1119 if (IS_ERR(fp))
1120 return PTR_ERR(fp);
1121
1122 *pfp = fp;
1123 return 0;
1124}
1125
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001126void bpf_prog_destroy(struct bpf_prog *fp)
Jiri Pirko302d6632012-03-31 11:01:19 +00001127{
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001128 __bpf_prog_release(fp);
Jiri Pirko302d6632012-03-31 11:01:19 +00001129}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001130EXPORT_SYMBOL_GPL(bpf_prog_destroy);
Jiri Pirko302d6632012-03-31 11:01:19 +00001131
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001132static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1133{
1134 struct sk_filter *fp, *old_fp;
1135
1136 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1137 if (!fp)
1138 return -ENOMEM;
1139
1140 fp->prog = prog;
1141 atomic_set(&fp->refcnt, 0);
1142
1143 if (!sk_filter_charge(sk, fp)) {
1144 kfree(fp);
1145 return -ENOMEM;
1146 }
1147
1148 old_fp = rcu_dereference_protected(sk->sk_filter,
1149 sock_owned_by_user(sk));
1150 rcu_assign_pointer(sk->sk_filter, fp);
1151
1152 if (old_fp)
1153 sk_filter_uncharge(sk, old_fp);
1154
1155 return 0;
1156}
1157
Pavel Emelyanov47e958e2007-10-17 21:22:42 -07001158/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 * sk_attach_filter - attach a socket filter
1160 * @fprog: the filter program
1161 * @sk: the socket to use
1162 *
1163 * Attach the user's filter code. We first run some sanity checks on
1164 * it to make sure it does not explode on us later. If an error
1165 * occurs or there is insufficient memory for the filter a negative
1166 * errno code is returned. On success the return is zero.
1167 */
1168int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1169{
Alexei Starovoitov009937e2014-07-30 20:34:13 -07001170 unsigned int fsize = bpf_classic_proglen(fprog);
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001171 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1172 struct bpf_prog *prog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 int err;
1174
Vincent Bernatd59577b2013-01-16 22:55:49 +01001175 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1176 return -EPERM;
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 /* Make sure new filter is there and in the right amounts. */
Kris Katterjohne35bedf2006-01-17 02:25:52 -08001179 if (fprog->filter == NULL)
1180 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001182 prog = bpf_prog_alloc(bpf_fsize, 0);
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001183 if (!prog)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 return -ENOMEM;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001185
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001186 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
Sasha Levinc0d13792014-09-13 00:06:30 -04001187 __bpf_prog_free(prog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return -EFAULT;
1189 }
1190
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001191 prog->len = fprog->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001193 err = bpf_prog_store_orig_filter(prog, fprog);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001194 if (err) {
Sasha Levinc0d13792014-09-13 00:06:30 -04001195 __bpf_prog_free(prog);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001196 return -ENOMEM;
1197 }
1198
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001199 /* bpf_prepare_filter() already takes care of freeing
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +01001200 * memory in case something goes wrong.
1201 */
Nicolas Schichan4ae92bc2015-05-06 16:12:27 +02001202 prog = bpf_prepare_filter(prog, NULL);
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001203 if (IS_ERR(prog))
1204 return PTR_ERR(prog);
1205
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001206 err = __sk_attach_prog(prog, sk);
1207 if (err < 0) {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001208 __bpf_prog_release(prog);
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001209 return err;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001210 }
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001211
Pavel Emelyanovd3904b72007-10-17 21:22:17 -07001212 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213}
Michael S. Tsirkin5ff3f072010-02-14 01:01:00 +00001214EXPORT_SYMBOL_GPL(sk_attach_filter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001216int sk_attach_bpf(u32 ufd, struct sock *sk)
1217{
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001218 struct bpf_prog *prog;
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001219 int err;
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001220
1221 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1222 return -EPERM;
1223
1224 prog = bpf_prog_get(ufd);
Alexei Starovoitov198bf1b2014-12-10 20:14:55 -08001225 if (IS_ERR(prog))
1226 return PTR_ERR(prog);
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001227
Daniel Borkmann24701ec2015-03-01 12:31:47 +01001228 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001229 bpf_prog_put(prog);
1230 return -EINVAL;
1231 }
1232
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001233 err = __sk_attach_prog(prog, sk);
1234 if (err < 0) {
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001235 bpf_prog_put(prog);
Daniel Borkmann49b31e52015-03-02 12:25:51 +01001236 return err;
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001237 }
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001238
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001239 return 0;
1240}
1241
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001242#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
1243
1244static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001245{
1246 struct sk_buff *skb = (struct sk_buff *) (long) r1;
Alexei Starovoitova1661512015-04-15 12:55:45 -07001247 int offset = (int) r2;
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001248 void *from = (void *) (long) r3;
1249 unsigned int len = (unsigned int) r4;
1250 char buf[16];
1251 void *ptr;
1252
1253 /* bpf verifier guarantees that:
1254 * 'from' pointer points to bpf program stack
1255 * 'len' bytes of it were initialized
1256 * 'len' > 0
1257 * 'skb' is a valid pointer to 'struct sk_buff'
1258 *
1259 * so check for invalid 'offset' and too large 'len'
1260 */
Alexei Starovoitova1661512015-04-15 12:55:45 -07001261 if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001262 return -EFAULT;
1263
Alexei Starovoitova1661512015-04-15 12:55:45 -07001264 if (unlikely(skb_cloned(skb) &&
Alexei Starovoitov34312052015-06-04 10:11:53 -07001265 !skb_clone_writable(skb, offset + len)))
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001266 return -EFAULT;
1267
1268 ptr = skb_header_pointer(skb, offset, len, buf);
1269 if (unlikely(!ptr))
1270 return -EFAULT;
1271
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001272 if (BPF_RECOMPUTE_CSUM(flags))
1273 skb_postpull_rcsum(skb, ptr, len);
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001274
1275 memcpy(ptr, from, len);
1276
1277 if (ptr == buf)
1278 /* skb_store_bits cannot return -EFAULT here */
1279 skb_store_bits(skb, offset, ptr, len);
1280
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001281 if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001282 skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
1283 return 0;
1284}
1285
1286const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1287 .func = bpf_skb_store_bytes,
1288 .gpl_only = false,
1289 .ret_type = RET_INTEGER,
1290 .arg1_type = ARG_PTR_TO_CTX,
1291 .arg2_type = ARG_ANYTHING,
1292 .arg3_type = ARG_PTR_TO_STACK,
1293 .arg4_type = ARG_CONST_STACK_SIZE,
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001294 .arg5_type = ARG_ANYTHING,
1295};
1296
1297#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
1298#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
1299
Alexei Starovoitova1661512015-04-15 12:55:45 -07001300static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001301{
1302 struct sk_buff *skb = (struct sk_buff *) (long) r1;
Alexei Starovoitova1661512015-04-15 12:55:45 -07001303 int offset = (int) r2;
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001304 __sum16 sum, *ptr;
1305
Alexei Starovoitova1661512015-04-15 12:55:45 -07001306 if (unlikely((u32) offset > 0xffff))
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001307 return -EFAULT;
1308
Alexei Starovoitova1661512015-04-15 12:55:45 -07001309 if (unlikely(skb_cloned(skb) &&
Alexei Starovoitov34312052015-06-04 10:11:53 -07001310 !skb_clone_writable(skb, offset + sizeof(sum))))
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001311 return -EFAULT;
1312
1313 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1314 if (unlikely(!ptr))
1315 return -EFAULT;
1316
1317 switch (BPF_HEADER_FIELD_SIZE(flags)) {
1318 case 2:
1319 csum_replace2(ptr, from, to);
1320 break;
1321 case 4:
1322 csum_replace4(ptr, from, to);
1323 break;
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 if (ptr == &sum)
1329 /* skb_store_bits guaranteed to not return -EFAULT here */
1330 skb_store_bits(skb, offset, ptr, sizeof(sum));
1331
1332 return 0;
1333}
1334
1335const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1336 .func = bpf_l3_csum_replace,
1337 .gpl_only = false,
1338 .ret_type = RET_INTEGER,
1339 .arg1_type = ARG_PTR_TO_CTX,
1340 .arg2_type = ARG_ANYTHING,
1341 .arg3_type = ARG_ANYTHING,
1342 .arg4_type = ARG_ANYTHING,
1343 .arg5_type = ARG_ANYTHING,
1344};
1345
Alexei Starovoitova1661512015-04-15 12:55:45 -07001346static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001347{
1348 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1349 u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
Alexei Starovoitova1661512015-04-15 12:55:45 -07001350 int offset = (int) r2;
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001351 __sum16 sum, *ptr;
1352
Alexei Starovoitova1661512015-04-15 12:55:45 -07001353 if (unlikely((u32) offset > 0xffff))
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001354 return -EFAULT;
1355
Alexei Starovoitova1661512015-04-15 12:55:45 -07001356 if (unlikely(skb_cloned(skb) &&
Alexei Starovoitov34312052015-06-04 10:11:53 -07001357 !skb_clone_writable(skb, offset + sizeof(sum))))
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001358 return -EFAULT;
1359
1360 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1361 if (unlikely(!ptr))
1362 return -EFAULT;
1363
1364 switch (BPF_HEADER_FIELD_SIZE(flags)) {
1365 case 2:
1366 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1367 break;
1368 case 4:
1369 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1370 break;
1371 default:
1372 return -EINVAL;
1373 }
1374
1375 if (ptr == &sum)
1376 /* skb_store_bits guaranteed to not return -EFAULT here */
1377 skb_store_bits(skb, offset, ptr, sizeof(sum));
1378
1379 return 0;
1380}
1381
1382const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1383 .func = bpf_l4_csum_replace,
1384 .gpl_only = false,
1385 .ret_type = RET_INTEGER,
1386 .arg1_type = ARG_PTR_TO_CTX,
1387 .arg2_type = ARG_ANYTHING,
1388 .arg3_type = ARG_ANYTHING,
1389 .arg4_type = ARG_ANYTHING,
1390 .arg5_type = ARG_ANYTHING,
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001391};
1392
Alexei Starovoitov3896d652015-06-02 16:03:14 -07001393#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1)
1394
1395static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1396{
1397 struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
1398 struct net_device *dev;
1399
1400 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
1401 if (unlikely(!dev))
1402 return -EINVAL;
1403
1404 if (unlikely(!(dev->flags & IFF_UP)))
1405 return -EINVAL;
1406
1407 skb2 = skb_clone(skb, GFP_ATOMIC);
1408 if (unlikely(!skb2))
1409 return -ENOMEM;
1410
Alexei Starovoitov3896d652015-06-02 16:03:14 -07001411 if (BPF_IS_REDIRECT_INGRESS(flags))
1412 return dev_forward_skb(dev, skb2);
1413
1414 skb2->dev = dev;
1415 return dev_queue_xmit(skb2);
1416}
1417
1418const struct bpf_func_proto bpf_clone_redirect_proto = {
1419 .func = bpf_clone_redirect,
1420 .gpl_only = false,
1421 .ret_type = RET_INTEGER,
1422 .arg1_type = ARG_PTR_TO_CTX,
1423 .arg2_type = ARG_ANYTHING,
1424 .arg3_type = ARG_ANYTHING,
1425};
1426
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001427static const struct bpf_func_proto *
1428sk_filter_func_proto(enum bpf_func_id func_id)
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001429{
1430 switch (func_id) {
1431 case BPF_FUNC_map_lookup_elem:
1432 return &bpf_map_lookup_elem_proto;
1433 case BPF_FUNC_map_update_elem:
1434 return &bpf_map_update_elem_proto;
1435 case BPF_FUNC_map_delete_elem:
1436 return &bpf_map_delete_elem_proto;
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001437 case BPF_FUNC_get_prandom_u32:
1438 return &bpf_get_prandom_u32_proto;
Daniel Borkmannc04167c2015-03-14 02:27:17 +01001439 case BPF_FUNC_get_smp_processor_id:
1440 return &bpf_get_smp_processor_id_proto;
Alexei Starovoitov04fd61a2015-05-19 16:59:03 -07001441 case BPF_FUNC_tail_call:
1442 return &bpf_tail_call_proto;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02001443 case BPF_FUNC_ktime_get_ns:
1444 return &bpf_ktime_get_ns_proto;
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07001445 case BPF_FUNC_trace_printk:
1446 return bpf_get_trace_printk_proto();
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001447 default:
1448 return NULL;
1449 }
1450}
1451
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001452static const struct bpf_func_proto *
1453tc_cls_act_func_proto(enum bpf_func_id func_id)
1454{
1455 switch (func_id) {
1456 case BPF_FUNC_skb_store_bytes:
1457 return &bpf_skb_store_bytes_proto;
Alexei Starovoitov91bc48222015-04-01 17:12:13 -07001458 case BPF_FUNC_l3_csum_replace:
1459 return &bpf_l3_csum_replace_proto;
1460 case BPF_FUNC_l4_csum_replace:
1461 return &bpf_l4_csum_replace_proto;
Alexei Starovoitov3896d652015-06-02 16:03:14 -07001462 case BPF_FUNC_clone_redirect:
1463 return &bpf_clone_redirect_proto;
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001464 default:
1465 return sk_filter_func_proto(func_id);
1466 }
1467}
1468
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001469static bool __is_valid_access(int off, int size, enum bpf_access_type type)
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001470{
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001471 /* check bounds */
1472 if (off < 0 || off >= sizeof(struct __sk_buff))
1473 return false;
1474
1475 /* disallow misaligned access */
1476 if (off % size != 0)
1477 return false;
1478
1479 /* all __sk_buff fields are __u32 */
1480 if (size != 4)
1481 return false;
1482
1483 return true;
1484}
1485
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001486static bool sk_filter_is_valid_access(int off, int size,
1487 enum bpf_access_type type)
1488{
1489 if (type == BPF_WRITE) {
1490 switch (off) {
1491 case offsetof(struct __sk_buff, cb[0]) ...
1492 offsetof(struct __sk_buff, cb[4]):
1493 break;
1494 default:
1495 return false;
1496 }
1497 }
1498
1499 return __is_valid_access(off, size, type);
1500}
1501
1502static bool tc_cls_act_is_valid_access(int off, int size,
1503 enum bpf_access_type type)
1504{
1505 if (type == BPF_WRITE) {
1506 switch (off) {
1507 case offsetof(struct __sk_buff, mark):
1508 case offsetof(struct __sk_buff, tc_index):
1509 case offsetof(struct __sk_buff, cb[0]) ...
1510 offsetof(struct __sk_buff, cb[4]):
1511 break;
1512 default:
1513 return false;
1514 }
1515 }
1516 return __is_valid_access(off, size, type);
1517}
1518
1519static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
1520 int src_reg, int ctx_off,
1521 struct bpf_insn *insn_buf)
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001522{
1523 struct bpf_insn *insn = insn_buf;
1524
1525 switch (ctx_off) {
1526 case offsetof(struct __sk_buff, len):
1527 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
1528
1529 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
1530 offsetof(struct sk_buff, len));
1531 break;
1532
Daniel Borkmann0b8c7072015-03-19 19:38:27 +01001533 case offsetof(struct __sk_buff, protocol):
1534 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
1535
1536 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
1537 offsetof(struct sk_buff, protocol));
1538 break;
1539
Michal Sekletar27cd5452015-03-24 14:48:41 +01001540 case offsetof(struct __sk_buff, vlan_proto):
1541 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
1542
1543 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
1544 offsetof(struct sk_buff, vlan_proto));
1545 break;
1546
Daniel Borkmannbcad5712015-04-03 20:52:24 +02001547 case offsetof(struct __sk_buff, priority):
1548 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
1549
1550 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
1551 offsetof(struct sk_buff, priority));
1552 break;
1553
Alexei Starovoitov37e82c22015-05-27 15:30:39 -07001554 case offsetof(struct __sk_buff, ingress_ifindex):
1555 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
1556
1557 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
1558 offsetof(struct sk_buff, skb_iif));
1559 break;
1560
1561 case offsetof(struct __sk_buff, ifindex):
1562 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
1563
1564 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
1565 dst_reg, src_reg,
1566 offsetof(struct sk_buff, dev));
1567 *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
1568 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
1569 offsetof(struct net_device, ifindex));
1570 break;
1571
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001572 case offsetof(struct __sk_buff, mark):
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001573 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1574
1575 if (type == BPF_WRITE)
1576 *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
1577 offsetof(struct sk_buff, mark));
1578 else
1579 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
1580 offsetof(struct sk_buff, mark));
1581 break;
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001582
1583 case offsetof(struct __sk_buff, pkt_type):
1584 return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
1585
1586 case offsetof(struct __sk_buff, queue_mapping):
1587 return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001588
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001589 case offsetof(struct __sk_buff, vlan_present):
1590 return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
1591 dst_reg, src_reg, insn);
1592
1593 case offsetof(struct __sk_buff, vlan_tci):
1594 return convert_skb_access(SKF_AD_VLAN_TAG,
1595 dst_reg, src_reg, insn);
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001596
1597 case offsetof(struct __sk_buff, cb[0]) ...
1598 offsetof(struct __sk_buff, cb[4]):
1599 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
1600
1601 ctx_off -= offsetof(struct __sk_buff, cb[0]);
1602 ctx_off += offsetof(struct sk_buff, cb);
1603 ctx_off += offsetof(struct qdisc_skb_cb, data);
1604 if (type == BPF_WRITE)
1605 *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
1606 else
1607 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
1608 break;
1609
1610 case offsetof(struct __sk_buff, tc_index):
1611#ifdef CONFIG_NET_SCHED
1612 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
1613
1614 if (type == BPF_WRITE)
1615 *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
1616 offsetof(struct sk_buff, tc_index));
1617 else
1618 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
1619 offsetof(struct sk_buff, tc_index));
1620 break;
1621#else
1622 if (type == BPF_WRITE)
1623 *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
1624 else
1625 *insn++ = BPF_MOV64_IMM(dst_reg, 0);
1626 break;
1627#endif
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001628 }
1629
1630 return insn - insn_buf;
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001631}
1632
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001633static const struct bpf_verifier_ops sk_filter_ops = {
1634 .get_func_proto = sk_filter_func_proto,
1635 .is_valid_access = sk_filter_is_valid_access,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001636 .convert_ctx_access = bpf_net_convert_ctx_access,
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001637};
1638
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001639static const struct bpf_verifier_ops tc_cls_act_ops = {
1640 .get_func_proto = tc_cls_act_func_proto,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001641 .is_valid_access = tc_cls_act_is_valid_access,
1642 .convert_ctx_access = bpf_net_convert_ctx_access,
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001643};
1644
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001645static struct bpf_prog_type_list sk_filter_type __read_mostly = {
1646 .ops = &sk_filter_ops,
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001647 .type = BPF_PROG_TYPE_SOCKET_FILTER,
1648};
1649
Daniel Borkmann96be4322015-03-01 12:31:46 +01001650static struct bpf_prog_type_list sched_cls_type __read_mostly = {
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001651 .ops = &tc_cls_act_ops,
Daniel Borkmann96be4322015-03-01 12:31:46 +01001652 .type = BPF_PROG_TYPE_SCHED_CLS,
1653};
1654
Daniel Borkmann94caee82015-03-20 15:11:11 +01001655static struct bpf_prog_type_list sched_act_type __read_mostly = {
Alexei Starovoitov608cd712015-03-26 19:53:57 -07001656 .ops = &tc_cls_act_ops,
Daniel Borkmann94caee82015-03-20 15:11:11 +01001657 .type = BPF_PROG_TYPE_SCHED_ACT,
1658};
1659
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001660static int __init register_sk_filter_ops(void)
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001661{
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001662 bpf_register_prog_type(&sk_filter_type);
Daniel Borkmann96be4322015-03-01 12:31:46 +01001663 bpf_register_prog_type(&sched_cls_type);
Daniel Borkmann94caee82015-03-20 15:11:11 +01001664 bpf_register_prog_type(&sched_act_type);
Daniel Borkmann96be4322015-03-01 12:31:46 +01001665
Alexei Starovoitov89aa0752014-12-01 15:06:35 -08001666 return 0;
1667}
Daniel Borkmannd4052c42015-03-01 12:31:45 +01001668late_initcall(register_sk_filter_ops);
1669
Pavel Emelyanov55b33322007-10-17 21:21:26 -07001670int sk_detach_filter(struct sock *sk)
1671{
1672 int ret = -ENOENT;
1673 struct sk_filter *filter;
1674
Vincent Bernatd59577b2013-01-16 22:55:49 +01001675 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1676 return -EPERM;
1677
Eric Dumazetf91ff5b2010-09-27 06:07:30 +00001678 filter = rcu_dereference_protected(sk->sk_filter,
1679 sock_owned_by_user(sk));
Pavel Emelyanov55b33322007-10-17 21:21:26 -07001680 if (filter) {
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001681 RCU_INIT_POINTER(sk->sk_filter, NULL);
Eric Dumazet46bcf142010-12-06 09:29:43 -08001682 sk_filter_uncharge(sk, filter);
Pavel Emelyanov55b33322007-10-17 21:21:26 -07001683 ret = 0;
1684 }
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001685
Pavel Emelyanov55b33322007-10-17 21:21:26 -07001686 return ret;
1687}
Michael S. Tsirkin5ff3f072010-02-14 01:01:00 +00001688EXPORT_SYMBOL_GPL(sk_detach_filter);
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001689
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001690int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1691 unsigned int len)
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001692{
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001693 struct sock_fprog_kern *fprog;
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001694 struct sk_filter *filter;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001695 int ret = 0;
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001696
1697 lock_sock(sk);
1698 filter = rcu_dereference_protected(sk->sk_filter,
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001699 sock_owned_by_user(sk));
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001700 if (!filter)
1701 goto out;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001702
1703 /* We're copying the filter that has been originally attached,
1704 * so no conversion/decode needed anymore.
1705 */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001706 fprog = filter->prog->orig_prog;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001707
1708 ret = fprog->len;
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001709 if (!len)
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001710 /* User space only enquires number of filter blocks. */
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001711 goto out;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001712
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001713 ret = -EINVAL;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001714 if (len < fprog->len)
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001715 goto out;
1716
1717 ret = -EFAULT;
Alexei Starovoitov009937e2014-07-30 20:34:13 -07001718 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001719 goto out;
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001720
Daniel Borkmanna3ea2692014-03-28 18:58:19 +01001721 /* Instead of bytes, the API requests to return the number
1722 * of filter blocks.
1723 */
1724 ret = fprog->len;
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001725out:
1726 release_sock(sk);
1727 return ret;
1728}