blob: a4c644c1c09141f8a992286681bd60433d76f057 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Daniel Mack30070982016-11-23 16:52:26 +01002#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
Roman Gushchin8bad74f2018-09-28 14:45:36 +00005#include <linux/bpf.h>
Roman Gushchinf292b872018-07-06 14:34:29 -07006#include <linux/errno.h>
Daniel Mack30070982016-11-23 16:52:26 +01007#include <linux/jump_label.h>
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -07008#include <linux/percpu.h>
Roman Gushchinde9cbba2018-08-02 14:27:18 -07009#include <linux/rbtree.h>
Daniel Mack30070982016-11-23 16:52:26 +010010#include <uapi/linux/bpf.h>
11
12struct sock;
Andrey Ignatov4fbac772018-03-30 15:08:02 -070013struct sockaddr;
Daniel Mack30070982016-11-23 16:52:26 +010014struct cgroup;
15struct sk_buff;
Roman Gushchinde9cbba2018-08-02 14:27:18 -070016struct bpf_map;
17struct bpf_prog;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070018struct bpf_sock_ops_kern;
Roman Gushchinde9cbba2018-08-02 14:27:18 -070019struct bpf_cgroup_storage;
Daniel Mack30070982016-11-23 16:52:26 +010020
21#ifdef CONFIG_CGROUP_BPF
22
23extern struct static_key_false cgroup_bpf_enabled_key;
24#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
25
Roman Gushchinf294b372018-09-28 14:45:40 +000026DECLARE_PER_CPU(struct bpf_cgroup_storage*,
27 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
Roman Gushchin8bad74f2018-09-28 14:45:36 +000028
29#define for_each_cgroup_storage_type(stype) \
30 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -070031
Roman Gushchinde9cbba2018-08-02 14:27:18 -070032struct bpf_cgroup_storage_map;
33
34struct bpf_storage_buffer {
35 struct rcu_head rcu;
36 char data[0];
37};
38
39struct bpf_cgroup_storage {
Roman Gushchinb741f162018-09-28 14:45:43 +000040 union {
41 struct bpf_storage_buffer *buf;
42 void __percpu *percpu_buf;
43 };
Roman Gushchinde9cbba2018-08-02 14:27:18 -070044 struct bpf_cgroup_storage_map *map;
45 struct bpf_cgroup_storage_key key;
46 struct list_head list;
47 struct rb_node node;
48 struct rcu_head rcu;
49};
50
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070051struct bpf_prog_list {
52 struct list_head node;
53 struct bpf_prog *prog;
Roman Gushchin8bad74f2018-09-28 14:45:36 +000054 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070055};
56
57struct bpf_prog_array;
58
Daniel Mack30070982016-11-23 16:52:26 +010059struct cgroup_bpf {
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070060 /* array of effective progs in this cgroup */
61 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
62
63 /* attached progs to this cgroup and attach flags
64 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
65 * have either zero or one element
66 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
Daniel Mack30070982016-11-23 16:52:26 +010067 */
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070068 struct list_head progs[MAX_BPF_ATTACH_TYPE];
69 u32 flags[MAX_BPF_ATTACH_TYPE];
70
71 /* temp storage for effective prog array used by prog_attach/detach */
72 struct bpf_prog_array __rcu *inactive;
Daniel Mack30070982016-11-23 16:52:26 +010073};
74
75void cgroup_bpf_put(struct cgroup *cgrp);
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070076int cgroup_bpf_inherit(struct cgroup *cgrp);
Daniel Mack30070982016-11-23 16:52:26 +010077
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070078int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
79 enum bpf_attach_type type, u32 flags);
80int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
Valdis Kletnieks1832f4e2019-01-29 01:47:06 -050081 enum bpf_attach_type type);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070082int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
83 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010084
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070085/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
86int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
87 enum bpf_attach_type type, u32 flags);
88int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
89 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070090int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
91 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010092
David Ahernb2cd1252016-12-01 08:48:03 -080093int __cgroup_bpf_run_filter_skb(struct sock *sk,
94 struct sk_buff *skb,
95 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010096
David Ahern610236582016-12-01 08:48:04 -080097int __cgroup_bpf_run_filter_sk(struct sock *sk,
98 enum bpf_attach_type type);
99
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700100int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
101 struct sockaddr *uaddr,
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700102 enum bpf_attach_type type,
103 void *t_ctx);
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700104
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700105int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
106 struct bpf_sock_ops_kern *sock_ops,
107 enum bpf_attach_type type);
108
Roman Gushchinebc614f2017-11-05 08:15:32 -0500109int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
110 short access, enum bpf_attach_type type);
111
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000112static inline enum bpf_cgroup_storage_type cgroup_storage_type(
113 struct bpf_map *map)
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -0700114{
Roman Gushchinb741f162018-09-28 14:45:43 +0000115 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
116 return BPF_CGROUP_STORAGE_PERCPU;
117
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000118 return BPF_CGROUP_STORAGE_SHARED;
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -0700119}
120
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000121static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
122 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
123{
124 enum bpf_cgroup_storage_type stype;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000125
Roman Gushchinf294b372018-09-28 14:45:40 +0000126 for_each_cgroup_storage_type(stype)
127 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000128}
129
130struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
131 enum bpf_cgroup_storage_type stype);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700132void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
133void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
134 struct cgroup *cgroup,
135 enum bpf_attach_type type);
136void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
137int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
138void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
139
Roman Gushchinb741f162018-09-28 14:45:43 +0000140int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
141int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
142 void *value, u64 flags);
143
David Ahernb2cd1252016-12-01 08:48:03 -0800144/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
145#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
146({ \
147 int __ret = 0; \
148 if (cgroup_bpf_enabled) \
149 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
150 BPF_CGROUP_INET_INGRESS); \
151 \
152 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +0100153})
154
David Ahernb2cd1252016-12-01 08:48:03 -0800155#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
156({ \
157 int __ret = 0; \
158 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
159 typeof(sk) __sk = sk_to_full_sk(sk); \
160 if (sk_fullsock(__sk)) \
161 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
162 BPF_CGROUP_INET_EGRESS); \
163 } \
164 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +0100165})
166
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700167#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
David Ahern610236582016-12-01 08:48:04 -0800168({ \
169 int __ret = 0; \
Yafang Shaoee078622018-02-23 14:58:41 +0800170 if (cgroup_bpf_enabled) { \
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700171 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
David Ahern610236582016-12-01 08:48:04 -0800172 } \
173 __ret; \
174})
175
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700176#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
177 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
178
179#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
180 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
181
182#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
183 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
184
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700185#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
186({ \
187 int __ret = 0; \
188 if (cgroup_bpf_enabled) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700189 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
190 NULL); \
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700191 __ret; \
192})
193
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700194#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700195({ \
196 int __ret = 0; \
197 if (cgroup_bpf_enabled) { \
198 lock_sock(sk); \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700199 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
200 t_ctx); \
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700201 release_sock(sk); \
202 } \
203 __ret; \
204})
205
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700206#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
207 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
208
209#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
210 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
211
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700212#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
213 sk->sk_prot->pre_connect)
214
215#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
216 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
217
218#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
219 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
220
221#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700222 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700223
224#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700225 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
226
227#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
228 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
229
230#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
231 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700232
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700233#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
234({ \
235 int __ret = 0; \
236 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
237 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -0700238 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700239 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
240 sock_ops, \
241 BPF_CGROUP_SOCK_OPS); \
242 } \
243 __ret; \
244})
Roman Gushchinebc614f2017-11-05 08:15:32 -0500245
246#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
247({ \
248 int __ret = 0; \
249 if (cgroup_bpf_enabled) \
250 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
251 access, \
252 BPF_CGROUP_DEVICE); \
253 \
254 __ret; \
255})
Sean Youngfdb5c452018-06-19 00:04:24 +0100256int cgroup_bpf_prog_attach(const union bpf_attr *attr,
257 enum bpf_prog_type ptype, struct bpf_prog *prog);
258int cgroup_bpf_prog_detach(const union bpf_attr *attr,
259 enum bpf_prog_type ptype);
260int cgroup_bpf_prog_query(const union bpf_attr *attr,
261 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +0100262#else
263
Sean Youngfdb5c452018-06-19 00:04:24 +0100264struct bpf_prog;
Daniel Mack30070982016-11-23 16:52:26 +0100265struct cgroup_bpf {};
266static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700267static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
Daniel Mack30070982016-11-23 16:52:26 +0100268
Sean Youngfdb5c452018-06-19 00:04:24 +0100269static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
270 enum bpf_prog_type ptype,
271 struct bpf_prog *prog)
272{
273 return -EINVAL;
274}
275
276static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
277 enum bpf_prog_type ptype)
278{
279 return -EINVAL;
280}
281
282static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
283 union bpf_attr __user *uattr)
284{
285 return -EINVAL;
286}
287
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000288static inline void bpf_cgroup_storage_set(
289 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700290static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
291 struct bpf_map *map) { return 0; }
292static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
293 struct bpf_map *map) {}
294static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
Bo YU71b91a52019-03-08 01:45:51 -0500295 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700296static inline void bpf_cgroup_storage_free(
297 struct bpf_cgroup_storage *storage) {}
Roman Gushchinb741f162018-09-28 14:45:43 +0000298static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
299 void *value) {
300 return 0;
301}
302static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
303 void *key, void *value, u64 flags) {
304 return 0;
305}
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700306
Andrey Ignatov13193b02018-05-25 08:55:22 -0700307#define cgroup_bpf_enabled (0)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700308#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
Daniel Mack30070982016-11-23 16:52:26 +0100309#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
310#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800311#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700312#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
313#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700314#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
315#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700316#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
317#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
318#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
319#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700320#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
321#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700322#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Roman Gushchinebc614f2017-11-05 08:15:32 -0500323#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100324
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000325#define for_each_cgroup_storage_type(stype) for (; false; )
326
Daniel Mack30070982016-11-23 16:52:26 +0100327#endif /* CONFIG_CGROUP_BPF */
328
329#endif /* _BPF_CGROUP_H */