Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 1 | #ifndef __BPF_API__ |
| 2 | #define __BPF_API__ |
| 3 | |
| 4 | /* Note: |
| 5 | * |
| 6 | * This file can be included into eBPF kernel programs. It contains |
| 7 | * a couple of useful helper functions, map/section ABI (bpf_elf.h), |
| 8 | * misc macros and some eBPF specific LLVM built-ins. |
| 9 | */ |
| 10 | |
| 11 | #include <stdint.h> |
| 12 | |
| 13 | #include <linux/pkt_cls.h> |
| 14 | #include <linux/bpf.h> |
| 15 | #include <linux/filter.h> |
| 16 | |
| 17 | #include <asm/byteorder.h> |
| 18 | |
| 19 | #include "bpf_elf.h" |
| 20 | |
| 21 | /** Misc macros. */ |
| 22 | |
| 23 | #ifndef __stringify |
| 24 | # define __stringify(X) #X |
| 25 | #endif |
| 26 | |
| 27 | #ifndef __maybe_unused |
| 28 | # define __maybe_unused __attribute__((__unused__)) |
| 29 | #endif |
| 30 | |
| 31 | #ifndef offsetof |
| 32 | # define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) |
| 33 | #endif |
| 34 | |
| 35 | #ifndef likely |
| 36 | # define likely(X) __builtin_expect(!!(X), 1) |
| 37 | #endif |
| 38 | |
| 39 | #ifndef unlikely |
| 40 | # define unlikely(X) __builtin_expect(!!(X), 0) |
| 41 | #endif |
| 42 | |
| 43 | #ifndef htons |
| 44 | # define htons(X) __constant_htons((X)) |
| 45 | #endif |
| 46 | |
| 47 | #ifndef ntohs |
| 48 | # define ntohs(X) __constant_ntohs((X)) |
| 49 | #endif |
| 50 | |
| 51 | #ifndef htonl |
| 52 | # define htonl(X) __constant_htonl((X)) |
| 53 | #endif |
| 54 | |
| 55 | #ifndef ntohl |
Daniel Borkmann | fd7f9c7 | 2015-12-14 16:57:32 +0100 | [diff] [blame] | 56 | # define ntohl(X) __constant_ntohl((X)) |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 57 | #endif |
| 58 | |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 59 | #ifndef __inline__ |
| 60 | # define __inline__ __attribute__((always_inline)) |
| 61 | #endif |
| 62 | |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 63 | /** Section helper macros. */ |
| 64 | |
| 65 | #ifndef __section |
| 66 | # define __section(NAME) \ |
| 67 | __attribute__((section(NAME), used)) |
| 68 | #endif |
| 69 | |
| 70 | #ifndef __section_tail |
| 71 | # define __section_tail(ID, KEY) \ |
| 72 | __section(__stringify(ID) "/" __stringify(KEY)) |
| 73 | #endif |
| 74 | |
| 75 | #ifndef __section_cls_entry |
| 76 | # define __section_cls_entry \ |
| 77 | __section(ELF_SECTION_CLASSIFIER) |
| 78 | #endif |
| 79 | |
| 80 | #ifndef __section_act_entry |
| 81 | # define __section_act_entry \ |
| 82 | __section(ELF_SECTION_ACTION) |
| 83 | #endif |
| 84 | |
| 85 | #ifndef __section_license |
| 86 | # define __section_license \ |
| 87 | __section(ELF_SECTION_LICENSE) |
| 88 | #endif |
| 89 | |
| 90 | #ifndef __section_maps |
| 91 | # define __section_maps \ |
| 92 | __section(ELF_SECTION_MAPS) |
| 93 | #endif |
| 94 | |
| 95 | /** Declaration helper macros. */ |
| 96 | |
| 97 | #ifndef BPF_LICENSE |
| 98 | # define BPF_LICENSE(NAME) \ |
| 99 | char ____license[] __section_license = NAME |
| 100 | #endif |
| 101 | |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 102 | /** Classifier helper */ |
| 103 | |
| 104 | #ifndef BPF_H_DEFAULT |
| 105 | # define BPF_H_DEFAULT -1 |
| 106 | #endif |
| 107 | |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 108 | /** BPF helper functions for tc. Individual flags are in linux/bpf.h */ |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 109 | |
| 110 | #ifndef BPF_FUNC |
| 111 | # define BPF_FUNC(NAME, ...) \ |
| 112 | (* NAME)(__VA_ARGS__) __maybe_unused = (void *) BPF_FUNC_##NAME |
| 113 | #endif |
| 114 | |
| 115 | /* Map access/manipulation */ |
| 116 | static void *BPF_FUNC(map_lookup_elem, void *map, const void *key); |
| 117 | static int BPF_FUNC(map_update_elem, void *map, const void *key, |
| 118 | const void *value, uint32_t flags); |
| 119 | static int BPF_FUNC(map_delete_elem, void *map, const void *key); |
| 120 | |
| 121 | /* Time access */ |
| 122 | static uint64_t BPF_FUNC(ktime_get_ns); |
| 123 | |
| 124 | /* Debugging */ |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 125 | |
| 126 | /* FIXME: __attribute__ ((format(printf, 1, 3))) not possible unless |
| 127 | * llvm bug https://llvm.org/bugs/show_bug.cgi?id=26243 gets resolved. |
| 128 | * It would require ____fmt to be made const, which generates a reloc |
| 129 | * entry (non-map). |
| 130 | */ |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 131 | static void BPF_FUNC(trace_printk, const char *fmt, int fmt_size, ...); |
| 132 | |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 133 | #ifndef printt |
| 134 | # define printt(fmt, ...) \ |
| 135 | ({ \ |
| 136 | char ____fmt[] = fmt; \ |
| 137 | trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \ |
| 138 | }) |
| 139 | #endif |
| 140 | |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 141 | /* Random numbers */ |
| 142 | static uint32_t BPF_FUNC(get_prandom_u32); |
| 143 | |
| 144 | /* Tail calls */ |
| 145 | static void BPF_FUNC(tail_call, struct __sk_buff *skb, void *map, |
| 146 | uint32_t index); |
| 147 | |
| 148 | /* System helpers */ |
| 149 | static uint32_t BPF_FUNC(get_smp_processor_id); |
| 150 | |
| 151 | /* Packet misc meta data */ |
| 152 | static uint32_t BPF_FUNC(get_cgroup_classid, struct __sk_buff *skb); |
| 153 | static uint32_t BPF_FUNC(get_route_realm, struct __sk_buff *skb); |
| 154 | |
| 155 | /* Packet redirection */ |
| 156 | static int BPF_FUNC(redirect, int ifindex, uint32_t flags); |
| 157 | static int BPF_FUNC(clone_redirect, struct __sk_buff *skb, int ifindex, |
| 158 | uint32_t flags); |
| 159 | |
| 160 | /* Packet manipulation */ |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 161 | static int BPF_FUNC(skb_load_bytes, struct __sk_buff *skb, uint32_t off, |
| 162 | void *to, uint32_t len); |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 163 | static int BPF_FUNC(skb_store_bytes, struct __sk_buff *skb, uint32_t off, |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 164 | const void *from, uint32_t len, uint32_t flags); |
| 165 | |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 166 | static int BPF_FUNC(l3_csum_replace, struct __sk_buff *skb, uint32_t off, |
| 167 | uint32_t from, uint32_t to, uint32_t flags); |
| 168 | static int BPF_FUNC(l4_csum_replace, struct __sk_buff *skb, uint32_t off, |
| 169 | uint32_t from, uint32_t to, uint32_t flags); |
Daniel Borkmann | 0395711 | 2016-04-09 00:32:03 +0200 | [diff] [blame] | 170 | static int BPF_FUNC(csum_diff, const void *from, uint32_t from_size, |
| 171 | const void *to, uint32_t to_size, uint32_t seed); |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 172 | |
| 173 | /* Packet vlan encap/decap */ |
| 174 | static int BPF_FUNC(skb_vlan_push, struct __sk_buff *skb, uint16_t proto, |
| 175 | uint16_t vlan_tci); |
| 176 | static int BPF_FUNC(skb_vlan_pop, struct __sk_buff *skb); |
| 177 | |
| 178 | /* Packet tunnel encap/decap */ |
| 179 | static int BPF_FUNC(skb_get_tunnel_key, struct __sk_buff *skb, |
| 180 | struct bpf_tunnel_key *to, uint32_t size, uint32_t flags); |
| 181 | static int BPF_FUNC(skb_set_tunnel_key, struct __sk_buff *skb, |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 182 | const struct bpf_tunnel_key *from, uint32_t size, |
| 183 | uint32_t flags); |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 184 | |
Daniel Borkmann | 0395711 | 2016-04-09 00:32:03 +0200 | [diff] [blame] | 185 | static int BPF_FUNC(skb_get_tunnel_opt, struct __sk_buff *skb, |
| 186 | void *to, uint32_t size); |
| 187 | static int BPF_FUNC(skb_set_tunnel_opt, struct __sk_buff *skb, |
| 188 | const void *from, uint32_t size); |
| 189 | |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 190 | /** LLVM built-ins, mem*() routines work for constant size */ |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 191 | |
| 192 | #ifndef lock_xadd |
| 193 | # define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val)) |
| 194 | #endif |
| 195 | |
Daniel Borkmann | 92a3699 | 2016-02-07 02:11:50 +0100 | [diff] [blame] | 196 | #ifndef memset |
| 197 | # define memset(s, c, n) __builtin_memset((s), (c), (n)) |
| 198 | #endif |
| 199 | |
| 200 | #ifndef memcpy |
| 201 | # define memcpy(d, s, n) __builtin_memcpy((d), (s), (n)) |
| 202 | #endif |
| 203 | |
| 204 | #ifndef memmove |
| 205 | # define memmove(d, s, n) __builtin_memmove((d), (s), (n)) |
| 206 | #endif |
| 207 | |
| 208 | /* FIXME: __builtin_memcmp() is not yet fully useable unless llvm bug |
| 209 | * https://llvm.org/bugs/show_bug.cgi?id=26218 gets resolved. Also |
| 210 | * this one would generate a reloc entry (non-map), otherwise. |
| 211 | */ |
| 212 | #if 0 |
| 213 | #ifndef memcmp |
| 214 | # define memcmp(a, b, n) __builtin_memcmp((a), (b), (n)) |
| 215 | #endif |
| 216 | #endif |
| 217 | |
Daniel Borkmann | 41d6e33 | 2015-12-02 00:25:36 +0100 | [diff] [blame] | 218 | unsigned long long load_byte(void *skb, unsigned long long off) |
| 219 | asm ("llvm.bpf.load.byte"); |
| 220 | |
| 221 | unsigned long long load_half(void *skb, unsigned long long off) |
| 222 | asm ("llvm.bpf.load.half"); |
| 223 | |
| 224 | unsigned long long load_word(void *skb, unsigned long long off) |
| 225 | asm ("llvm.bpf.load.word"); |
| 226 | |
| 227 | #endif /* __BPF_API__ */ |