blob: f91b0f8ff3a927d3d8a9c950447278dea9adea28 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Daniel Mack30070982016-11-23 16:52:26 +01002#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
Roman Gushchinf292b872018-07-06 14:34:29 -07005#include <linux/errno.h>
Daniel Mack30070982016-11-23 16:52:26 +01006#include <linux/jump_label.h>
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -07007#include <linux/percpu.h>
Roman Gushchinde9cbba2018-08-02 14:27:18 -07008#include <linux/rbtree.h>
Daniel Mack30070982016-11-23 16:52:26 +01009#include <uapi/linux/bpf.h>
10
11struct sock;
Andrey Ignatov4fbac772018-03-30 15:08:02 -070012struct sockaddr;
Daniel Mack30070982016-11-23 16:52:26 +010013struct cgroup;
14struct sk_buff;
Roman Gushchinde9cbba2018-08-02 14:27:18 -070015struct bpf_map;
16struct bpf_prog;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070017struct bpf_sock_ops_kern;
Roman Gushchinde9cbba2018-08-02 14:27:18 -070018struct bpf_cgroup_storage;
Daniel Mack30070982016-11-23 16:52:26 +010019
20#ifdef CONFIG_CGROUP_BPF
21
22extern struct static_key_false cgroup_bpf_enabled_key;
23#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
24
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -070025DECLARE_PER_CPU(void*, bpf_cgroup_storage);
26
Roman Gushchinde9cbba2018-08-02 14:27:18 -070027struct bpf_cgroup_storage_map;
28
29struct bpf_storage_buffer {
30 struct rcu_head rcu;
31 char data[0];
32};
33
34struct bpf_cgroup_storage {
35 struct bpf_storage_buffer *buf;
36 struct bpf_cgroup_storage_map *map;
37 struct bpf_cgroup_storage_key key;
38 struct list_head list;
39 struct rb_node node;
40 struct rcu_head rcu;
41};
42
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070043struct bpf_prog_list {
44 struct list_head node;
45 struct bpf_prog *prog;
Roman Gushchind7bf2c12018-08-02 14:27:20 -070046 struct bpf_cgroup_storage *storage;
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070047};
48
49struct bpf_prog_array;
50
Daniel Mack30070982016-11-23 16:52:26 +010051struct cgroup_bpf {
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070052 /* array of effective progs in this cgroup */
53 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
54
55 /* attached progs to this cgroup and attach flags
56 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
57 * have either zero or one element
58 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
Daniel Mack30070982016-11-23 16:52:26 +010059 */
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070060 struct list_head progs[MAX_BPF_ATTACH_TYPE];
61 u32 flags[MAX_BPF_ATTACH_TYPE];
62
63 /* temp storage for effective prog array used by prog_attach/detach */
64 struct bpf_prog_array __rcu *inactive;
Daniel Mack30070982016-11-23 16:52:26 +010065};
66
67void cgroup_bpf_put(struct cgroup *cgrp);
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070068int cgroup_bpf_inherit(struct cgroup *cgrp);
Daniel Mack30070982016-11-23 16:52:26 +010069
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070070int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
71 enum bpf_attach_type type, u32 flags);
72int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
73 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070074int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
75 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010076
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070077/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
78int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
79 enum bpf_attach_type type, u32 flags);
80int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
81 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070082int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
83 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010084
David Ahernb2cd1252016-12-01 08:48:03 -080085int __cgroup_bpf_run_filter_skb(struct sock *sk,
86 struct sk_buff *skb,
87 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010088
David Ahern610236582016-12-01 08:48:04 -080089int __cgroup_bpf_run_filter_sk(struct sock *sk,
90 enum bpf_attach_type type);
91
Andrey Ignatov4fbac772018-03-30 15:08:02 -070092int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
93 struct sockaddr *uaddr,
Andrey Ignatov1cedee12018-05-25 08:55:23 -070094 enum bpf_attach_type type,
95 void *t_ctx);
Andrey Ignatov4fbac772018-03-30 15:08:02 -070096
Lawrence Brakmo40304b22017-06-30 20:02:40 -070097int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
98 struct bpf_sock_ops_kern *sock_ops,
99 enum bpf_attach_type type);
100
Roman Gushchinebc614f2017-11-05 08:15:32 -0500101int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
102 short access, enum bpf_attach_type type);
103
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -0700104static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
105{
106 struct bpf_storage_buffer *buf;
107
108 if (!storage)
109 return;
110
111 buf = READ_ONCE(storage->buf);
112 this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
113}
114
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700115struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
116void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
117void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
118 struct cgroup *cgroup,
119 enum bpf_attach_type type);
120void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
121int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
122void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
123
David Ahernb2cd1252016-12-01 08:48:03 -0800124/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
125#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
126({ \
127 int __ret = 0; \
128 if (cgroup_bpf_enabled) \
129 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
130 BPF_CGROUP_INET_INGRESS); \
131 \
132 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +0100133})
134
David Ahernb2cd1252016-12-01 08:48:03 -0800135#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
136({ \
137 int __ret = 0; \
138 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
139 typeof(sk) __sk = sk_to_full_sk(sk); \
140 if (sk_fullsock(__sk)) \
141 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
142 BPF_CGROUP_INET_EGRESS); \
143 } \
144 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +0100145})
146
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700147#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
David Ahern610236582016-12-01 08:48:04 -0800148({ \
149 int __ret = 0; \
Yafang Shaoee078622018-02-23 14:58:41 +0800150 if (cgroup_bpf_enabled) { \
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700151 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
David Ahern610236582016-12-01 08:48:04 -0800152 } \
153 __ret; \
154})
155
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700156#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
157 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
158
159#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
160 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
161
162#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
163 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
164
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700165#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
166({ \
167 int __ret = 0; \
168 if (cgroup_bpf_enabled) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700169 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
170 NULL); \
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700171 __ret; \
172})
173
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700174#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700175({ \
176 int __ret = 0; \
177 if (cgroup_bpf_enabled) { \
178 lock_sock(sk); \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700179 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
180 t_ctx); \
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700181 release_sock(sk); \
182 } \
183 __ret; \
184})
185
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700186#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
187 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
188
189#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
190 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
191
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700192#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
193 sk->sk_prot->pre_connect)
194
195#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
196 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
197
198#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
199 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
200
201#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700202 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700203
204#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700205 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
206
207#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
208 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
209
210#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
211 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700212
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700213#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
214({ \
215 int __ret = 0; \
216 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
217 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -0700218 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700219 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
220 sock_ops, \
221 BPF_CGROUP_SOCK_OPS); \
222 } \
223 __ret; \
224})
Roman Gushchinebc614f2017-11-05 08:15:32 -0500225
226#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
227({ \
228 int __ret = 0; \
229 if (cgroup_bpf_enabled) \
230 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
231 access, \
232 BPF_CGROUP_DEVICE); \
233 \
234 __ret; \
235})
Sean Youngfdb5c452018-06-19 00:04:24 +0100236int cgroup_bpf_prog_attach(const union bpf_attr *attr,
237 enum bpf_prog_type ptype, struct bpf_prog *prog);
238int cgroup_bpf_prog_detach(const union bpf_attr *attr,
239 enum bpf_prog_type ptype);
240int cgroup_bpf_prog_query(const union bpf_attr *attr,
241 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +0100242#else
243
Sean Youngfdb5c452018-06-19 00:04:24 +0100244struct bpf_prog;
Daniel Mack30070982016-11-23 16:52:26 +0100245struct cgroup_bpf {};
246static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700247static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
Daniel Mack30070982016-11-23 16:52:26 +0100248
Sean Youngfdb5c452018-06-19 00:04:24 +0100249static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
250 enum bpf_prog_type ptype,
251 struct bpf_prog *prog)
252{
253 return -EINVAL;
254}
255
256static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
257 enum bpf_prog_type ptype)
258{
259 return -EINVAL;
260}
261
262static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
263 union bpf_attr __user *uattr)
264{
265 return -EINVAL;
266}
267
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -0700268static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700269static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
270 struct bpf_map *map) { return 0; }
271static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
272 struct bpf_map *map) {}
273static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
274 struct bpf_prog *prog) { return 0; }
275static inline void bpf_cgroup_storage_free(
276 struct bpf_cgroup_storage *storage) {}
277
Andrey Ignatov13193b02018-05-25 08:55:22 -0700278#define cgroup_bpf_enabled (0)
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700279#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
Daniel Mack30070982016-11-23 16:52:26 +0100280#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
281#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800282#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700283#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
284#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700285#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
286#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700287#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
288#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
289#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
290#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
Andrey Ignatov1cedee12018-05-25 08:55:23 -0700291#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
292#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700293#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Roman Gushchinebc614f2017-11-05 08:15:32 -0500294#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100295
296#endif /* CONFIG_CGROUP_BPF */
297
298#endif /* _BPF_CGROUP_H */