Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 2 | #ifndef _BPF_CGROUP_H |
| 3 | #define _BPF_CGROUP_H |
| 4 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 5 | #include <linux/bpf.h> |
Roman Gushchin | f292b87 | 2018-07-06 14:34:29 -0700 | [diff] [blame] | 6 | #include <linux/errno.h> |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 7 | #include <linux/jump_label.h> |
Roman Gushchin | aa0ad5b | 2018-08-02 14:27:19 -0700 | [diff] [blame] | 8 | #include <linux/percpu.h> |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 9 | #include <linux/rbtree.h> |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 10 | #include <uapi/linux/bpf.h> |
| 11 | |
| 12 | struct sock; |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 13 | struct sockaddr; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 14 | struct cgroup; |
| 15 | struct sk_buff; |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 16 | struct bpf_map; |
| 17 | struct bpf_prog; |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 18 | struct bpf_sock_ops_kern; |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 19 | struct bpf_cgroup_storage; |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame^] | 20 | struct ctl_table; |
| 21 | struct ctl_table_header; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 22 | |
| 23 | #ifdef CONFIG_CGROUP_BPF |
| 24 | |
| 25 | extern struct static_key_false cgroup_bpf_enabled_key; |
| 26 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) |
| 27 | |
Roman Gushchin | f294b37 | 2018-09-28 14:45:40 +0000 | [diff] [blame] | 28 | DECLARE_PER_CPU(struct bpf_cgroup_storage*, |
| 29 | bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 30 | |
| 31 | #define for_each_cgroup_storage_type(stype) \ |
| 32 | for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) |
Roman Gushchin | aa0ad5b | 2018-08-02 14:27:19 -0700 | [diff] [blame] | 33 | |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 34 | struct bpf_cgroup_storage_map; |
| 35 | |
| 36 | struct bpf_storage_buffer { |
| 37 | struct rcu_head rcu; |
| 38 | char data[0]; |
| 39 | }; |
| 40 | |
| 41 | struct bpf_cgroup_storage { |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 42 | union { |
| 43 | struct bpf_storage_buffer *buf; |
| 44 | void __percpu *percpu_buf; |
| 45 | }; |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 46 | struct bpf_cgroup_storage_map *map; |
| 47 | struct bpf_cgroup_storage_key key; |
| 48 | struct list_head list; |
| 49 | struct rb_node node; |
| 50 | struct rcu_head rcu; |
| 51 | }; |
| 52 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 53 | struct bpf_prog_list { |
| 54 | struct list_head node; |
| 55 | struct bpf_prog *prog; |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 56 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 57 | }; |
| 58 | |
| 59 | struct bpf_prog_array; |
| 60 | |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 61 | struct cgroup_bpf { |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 62 | /* array of effective progs in this cgroup */ |
| 63 | struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; |
| 64 | |
| 65 | /* attached progs to this cgroup and attach flags |
| 66 | * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will |
| 67 | * have either zero or one element |
| 68 | * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 69 | */ |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 70 | struct list_head progs[MAX_BPF_ATTACH_TYPE]; |
| 71 | u32 flags[MAX_BPF_ATTACH_TYPE]; |
| 72 | |
| 73 | /* temp storage for effective prog array used by prog_attach/detach */ |
| 74 | struct bpf_prog_array __rcu *inactive; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 75 | }; |
| 76 | |
| 77 | void cgroup_bpf_put(struct cgroup *cgrp); |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 78 | int cgroup_bpf_inherit(struct cgroup *cgrp); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 79 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 80 | int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 81 | enum bpf_attach_type type, u32 flags); |
| 82 | int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, |
Valdis Kletnieks | 1832f4e | 2019-01-29 01:47:06 -0500 | [diff] [blame] | 83 | enum bpf_attach_type type); |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 84 | int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, |
| 85 | union bpf_attr __user *uattr); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 86 | |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 87 | /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ |
| 88 | int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 89 | enum bpf_attach_type type, u32 flags); |
| 90 | int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 91 | enum bpf_attach_type type, u32 flags); |
Alexei Starovoitov | 468e2f6 | 2017-10-02 22:50:22 -0700 | [diff] [blame] | 92 | int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, |
| 93 | union bpf_attr __user *uattr); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 94 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 95 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
| 96 | struct sk_buff *skb, |
| 97 | enum bpf_attach_type type); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 98 | |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 99 | int __cgroup_bpf_run_filter_sk(struct sock *sk, |
| 100 | enum bpf_attach_type type); |
| 101 | |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 102 | int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, |
| 103 | struct sockaddr *uaddr, |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 104 | enum bpf_attach_type type, |
| 105 | void *t_ctx); |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 106 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 107 | int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, |
| 108 | struct bpf_sock_ops_kern *sock_ops, |
| 109 | enum bpf_attach_type type); |
| 110 | |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 111 | int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, |
| 112 | short access, enum bpf_attach_type type); |
| 113 | |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame^] | 114 | int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, |
| 115 | struct ctl_table *table, int write, |
| 116 | enum bpf_attach_type type); |
| 117 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 118 | static inline enum bpf_cgroup_storage_type cgroup_storage_type( |
| 119 | struct bpf_map *map) |
Roman Gushchin | aa0ad5b | 2018-08-02 14:27:19 -0700 | [diff] [blame] | 120 | { |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 121 | if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) |
| 122 | return BPF_CGROUP_STORAGE_PERCPU; |
| 123 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 124 | return BPF_CGROUP_STORAGE_SHARED; |
Roman Gushchin | aa0ad5b | 2018-08-02 14:27:19 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 127 | static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage |
| 128 | *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) |
| 129 | { |
| 130 | enum bpf_cgroup_storage_type stype; |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 131 | |
Roman Gushchin | f294b37 | 2018-09-28 14:45:40 +0000 | [diff] [blame] | 132 | for_each_cgroup_storage_type(stype) |
| 133 | this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, |
| 137 | enum bpf_cgroup_storage_type stype); |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 138 | void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); |
| 139 | void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, |
| 140 | struct cgroup *cgroup, |
| 141 | enum bpf_attach_type type); |
| 142 | void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); |
| 143 | int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); |
| 144 | void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); |
| 145 | |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 146 | int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); |
| 147 | int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, |
| 148 | void *value, u64 flags); |
| 149 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 150 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ |
| 151 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ |
| 152 | ({ \ |
| 153 | int __ret = 0; \ |
| 154 | if (cgroup_bpf_enabled) \ |
| 155 | __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ |
| 156 | BPF_CGROUP_INET_INGRESS); \ |
| 157 | \ |
| 158 | __ret; \ |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 159 | }) |
| 160 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 161 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ |
| 162 | ({ \ |
| 163 | int __ret = 0; \ |
| 164 | if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ |
| 165 | typeof(sk) __sk = sk_to_full_sk(sk); \ |
| 166 | if (sk_fullsock(__sk)) \ |
| 167 | __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ |
| 168 | BPF_CGROUP_INET_EGRESS); \ |
| 169 | } \ |
| 170 | __ret; \ |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 171 | }) |
| 172 | |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 173 | #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 174 | ({ \ |
| 175 | int __ret = 0; \ |
Yafang Shao | ee07862 | 2018-02-23 14:58:41 +0800 | [diff] [blame] | 176 | if (cgroup_bpf_enabled) { \ |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 177 | __ret = __cgroup_bpf_run_filter_sk(sk, type); \ |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 178 | } \ |
| 179 | __ret; \ |
| 180 | }) |
| 181 | |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 182 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ |
| 183 | BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) |
| 184 | |
| 185 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ |
| 186 | BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) |
| 187 | |
| 188 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ |
| 189 | BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) |
| 190 | |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 191 | #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ |
| 192 | ({ \ |
| 193 | int __ret = 0; \ |
| 194 | if (cgroup_bpf_enabled) \ |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 195 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ |
| 196 | NULL); \ |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 197 | __ret; \ |
| 198 | }) |
| 199 | |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 200 | #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 201 | ({ \ |
| 202 | int __ret = 0; \ |
| 203 | if (cgroup_bpf_enabled) { \ |
| 204 | lock_sock(sk); \ |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 205 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ |
| 206 | t_ctx); \ |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 207 | release_sock(sk); \ |
| 208 | } \ |
| 209 | __ret; \ |
| 210 | }) |
| 211 | |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 212 | #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ |
| 213 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) |
| 214 | |
| 215 | #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ |
| 216 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) |
| 217 | |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 218 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ |
| 219 | sk->sk_prot->pre_connect) |
| 220 | |
| 221 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ |
| 222 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) |
| 223 | |
| 224 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ |
| 225 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) |
| 226 | |
| 227 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 228 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 229 | |
| 230 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 231 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) |
| 232 | |
| 233 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ |
| 234 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) |
| 235 | |
| 236 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ |
| 237 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 238 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 239 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ |
| 240 | ({ \ |
| 241 | int __ret = 0; \ |
| 242 | if (cgroup_bpf_enabled && (sock_ops)->sk) { \ |
| 243 | typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ |
WANG Cong | df39a9f | 2017-07-17 11:42:55 -0700 | [diff] [blame] | 244 | if (__sk && sk_fullsock(__sk)) \ |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 245 | __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ |
| 246 | sock_ops, \ |
| 247 | BPF_CGROUP_SOCK_OPS); \ |
| 248 | } \ |
| 249 | __ret; \ |
| 250 | }) |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 251 | |
| 252 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ |
| 253 | ({ \ |
| 254 | int __ret = 0; \ |
| 255 | if (cgroup_bpf_enabled) \ |
| 256 | __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ |
| 257 | access, \ |
| 258 | BPF_CGROUP_DEVICE); \ |
| 259 | \ |
| 260 | __ret; \ |
| 261 | }) |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame^] | 262 | |
| 263 | |
| 264 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write) \ |
| 265 | ({ \ |
| 266 | int __ret = 0; \ |
| 267 | if (cgroup_bpf_enabled) \ |
| 268 | __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ |
| 269 | BPF_CGROUP_SYSCTL); \ |
| 270 | __ret; \ |
| 271 | }) |
| 272 | |
Sean Young | fdb5c45 | 2018-06-19 00:04:24 +0100 | [diff] [blame] | 273 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
| 274 | enum bpf_prog_type ptype, struct bpf_prog *prog); |
| 275 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
| 276 | enum bpf_prog_type ptype); |
| 277 | int cgroup_bpf_prog_query(const union bpf_attr *attr, |
| 278 | union bpf_attr __user *uattr); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 279 | #else |
| 280 | |
Sean Young | fdb5c45 | 2018-06-19 00:04:24 +0100 | [diff] [blame] | 281 | struct bpf_prog; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 282 | struct cgroup_bpf {}; |
| 283 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
Alexei Starovoitov | 324bda9e6 | 2017-10-02 22:50:21 -0700 | [diff] [blame] | 284 | static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 285 | |
Sean Young | fdb5c45 | 2018-06-19 00:04:24 +0100 | [diff] [blame] | 286 | static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
| 287 | enum bpf_prog_type ptype, |
| 288 | struct bpf_prog *prog) |
| 289 | { |
| 290 | return -EINVAL; |
| 291 | } |
| 292 | |
| 293 | static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
| 294 | enum bpf_prog_type ptype) |
| 295 | { |
| 296 | return -EINVAL; |
| 297 | } |
| 298 | |
| 299 | static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, |
| 300 | union bpf_attr __user *uattr) |
| 301 | { |
| 302 | return -EINVAL; |
| 303 | } |
| 304 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 305 | static inline void bpf_cgroup_storage_set( |
| 306 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 307 | static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, |
| 308 | struct bpf_map *map) { return 0; } |
| 309 | static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, |
| 310 | struct bpf_map *map) {} |
| 311 | static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( |
Bo YU | 71b91a5 | 2019-03-08 01:45:51 -0500 | [diff] [blame] | 312 | struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 313 | static inline void bpf_cgroup_storage_free( |
| 314 | struct bpf_cgroup_storage *storage) {} |
Roman Gushchin | b741f16 | 2018-09-28 14:45:43 +0000 | [diff] [blame] | 315 | static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, |
| 316 | void *value) { |
| 317 | return 0; |
| 318 | } |
| 319 | static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, |
| 320 | void *key, void *value, u64 flags) { |
| 321 | return 0; |
| 322 | } |
Roman Gushchin | de9cbba | 2018-08-02 14:27:18 -0700 | [diff] [blame] | 323 | |
Andrey Ignatov | 13193b0 | 2018-05-25 08:55:22 -0700 | [diff] [blame] | 324 | #define cgroup_bpf_enabled (0) |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 325 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 326 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
| 327 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 328 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) |
Andrey Ignatov | 4fbac77 | 2018-03-30 15:08:02 -0700 | [diff] [blame] | 329 | #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) |
| 330 | #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) |
Andrey Ignatov | aac3fc3 | 2018-03-30 15:08:07 -0700 | [diff] [blame] | 331 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) |
| 332 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) |
Andrey Ignatov | d74bad4 | 2018-03-30 15:08:05 -0700 | [diff] [blame] | 333 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) |
| 334 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) |
| 335 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) |
| 336 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) |
Andrey Ignatov | 1cedee1 | 2018-05-25 08:55:23 -0700 | [diff] [blame] | 337 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) |
| 338 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame] | 339 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) |
Roman Gushchin | ebc614f | 2017-11-05 08:15:32 -0500 | [diff] [blame] | 340 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) |
Andrey Ignatov | 7b146ce | 2019-02-27 12:59:24 -0800 | [diff] [blame^] | 341 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write) ({ 0; }) |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 342 | |
Roman Gushchin | 8bad74f | 2018-09-28 14:45:36 +0000 | [diff] [blame] | 343 | #define for_each_cgroup_storage_type(stype) for (; false; ) |
| 344 | |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 345 | #endif /* CONFIG_CGROUP_BPF */ |
| 346 | |
| 347 | #endif /* _BPF_CGROUP_H */ |