blob: 91d07efed2bab70de68ea86d6741f070fb65f80e [file] [log] [blame]
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_VERIFIER_H
8#define _LINUX_BPF_VERIFIER_H 1
9
10#include <linux/bpf.h> /* for enum bpf_reg_type */
11#include <linux/filter.h> /* for MAX_BPF_STACK */
Edward Creef1174f72017-08-07 15:26:19 +010012#include <linux/tnum.h>
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013
Edward Creeb03c9f92017-08-07 15:26:36 +010014/* Maximum variable offset umax_value permitted when resolving memory accesses.
15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
17 */
18#define BPF_MAX_VAR_OFF (1ULL << 31)
19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow.
21 */
22#define BPF_MAX_VAR_SIZ INT_MAX
Josef Bacik48461132016-09-28 10:54:32 -040023
Edward Creedc503a82017-08-15 20:34:35 +010024enum bpf_reg_liveness {
25 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
26 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
27 REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
28};
29
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010030struct bpf_reg_state {
31 enum bpf_reg_type type;
32 union {
Edward Creef1174f72017-08-07 15:26:19 +010033 /* valid when type == PTR_TO_PACKET */
34 u16 range;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010035
36 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
37 * PTR_TO_MAP_VALUE_OR_NULL
38 */
39 struct bpf_map *map_ptr;
40 };
Edward Creef1174f72017-08-07 15:26:19 +010041 /* Fixed part of pointer offset, pointer types only */
42 s32 off;
43 /* For PTR_TO_PACKET, used to find other pointers with the same variable
44 * offset, so they can share range knowledge.
45 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
46 * came from, when one is tested for != NULL.
47 */
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080048 u32 id;
Edward Creedc503a82017-08-15 20:34:35 +010049 /* Ordering of fields matters. See states_equal() */
Edward Creef1174f72017-08-07 15:26:19 +010050 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
51 * the actual value.
52 * For pointer types, this represents the variable part of the offset
53 * from the pointed-to object, and is shared with all bpf_reg_states
54 * with the same id as us.
55 */
56 struct tnum var_off;
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080057 /* Used to determine if any memory access using this register will
Edward Creef1174f72017-08-07 15:26:19 +010058 * result in a bad access.
59 * These refer to the same value as var_off, not necessarily the actual
60 * contents of the register.
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080061 */
Edward Creeb03c9f92017-08-07 15:26:36 +010062 s64 smin_value; /* minimum possible (s64)value */
63 s64 smax_value; /* maximum possible (s64)value */
64 u64 umin_value; /* minimum possible (u64)value */
65 u64 umax_value; /* maximum possible (u64)value */
Edward Creedc503a82017-08-15 20:34:35 +010066 /* This field must be last, for states_equal() reasons. */
67 enum bpf_reg_liveness live;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010068};
69
70enum bpf_stack_slot_type {
71 STACK_INVALID, /* nothing was stored in this stack slot */
72 STACK_SPILL, /* register spilled into stack */
73 STACK_MISC /* BPF program wrote some data into this slot */
74};
75
76#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
77
78/* state of the program:
79 * type of all registers and stack info
80 */
81struct bpf_verifier_state {
82 struct bpf_reg_state regs[MAX_BPF_REG];
83 u8 stack_slot_type[MAX_BPF_STACK];
84 struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
Edward Creedc503a82017-08-15 20:34:35 +010085 struct bpf_verifier_state *parent;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010086};
87
88/* linked list of verifier states used to prune search */
89struct bpf_verifier_state_list {
90 struct bpf_verifier_state state;
91 struct bpf_verifier_state_list *next;
92};
93
94struct bpf_insn_aux_data {
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -070095 union {
96 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
97 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
98 };
Yonghong Song23994632017-06-22 15:07:39 -070099 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
100 int converted_op_size; /* the valid value width after perceived conversion */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100101};
102
103#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
104
Jakub Kicinski13a27df2016-09-21 11:43:58 +0100105struct bpf_verifier_env;
106struct bpf_ext_analyzer_ops {
107 int (*insn_hook)(struct bpf_verifier_env *env,
108 int insn_idx, int prev_insn_idx);
109};
110
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100111/* single container for all structs
112 * one verifier_env per bpf_check() call
113 */
114struct bpf_verifier_env {
115 struct bpf_prog *prog; /* eBPF program being verified */
116 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
117 int stack_size; /* number of states to be processed */
David S. Millere07b98d2017-05-10 11:38:07 -0700118 bool strict_alignment; /* perform strict pointer alignment checks */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100119 struct bpf_verifier_state cur_state; /* current verifier state */
120 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
Jakub Kicinski13a27df2016-09-21 11:43:58 +0100121 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
122 void *analyzer_priv; /* pointer to external analyzer's private data */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100123 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
124 u32 used_map_cnt; /* number of used maps */
125 u32 id_gen; /* used to generate unique reg IDs */
126 bool allow_ptr_leaks;
127 bool seen_direct_write;
Josef Bacik48461132016-09-28 10:54:32 -0400128 bool varlen_map_value_access;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100129 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
130};
131
Jakub Kicinski13a27df2016-09-21 11:43:58 +0100132int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
133 void *priv);
134
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100135#endif /* _LINUX_BPF_VERIFIER_H */