blob: 30d15e64b99337a1627be3675ebf56e842bbdfd4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Daniel Mack30070982016-11-23 16:52:26 +01002#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
Daniel Mack30070982016-11-23 16:52:26 +01005#include <linux/jump_label.h>
6#include <uapi/linux/bpf.h>
7
8struct sock;
Andrey Ignatov4fbac772018-03-30 15:08:02 -07009struct sockaddr;
Daniel Mack30070982016-11-23 16:52:26 +010010struct cgroup;
11struct sk_buff;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070012struct bpf_sock_ops_kern;
Daniel Mack30070982016-11-23 16:52:26 +010013
14#ifdef CONFIG_CGROUP_BPF
15
16extern struct static_key_false cgroup_bpf_enabled_key;
17#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
18
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070019struct bpf_prog_list {
20 struct list_head node;
21 struct bpf_prog *prog;
22};
23
24struct bpf_prog_array;
25
Daniel Mack30070982016-11-23 16:52:26 +010026struct cgroup_bpf {
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070027 /* array of effective progs in this cgroup */
28 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
29
30 /* attached progs to this cgroup and attach flags
31 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
32 * have either zero or one element
33 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
Daniel Mack30070982016-11-23 16:52:26 +010034 */
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070035 struct list_head progs[MAX_BPF_ATTACH_TYPE];
36 u32 flags[MAX_BPF_ATTACH_TYPE];
37
38 /* temp storage for effective prog array used by prog_attach/detach */
39 struct bpf_prog_array __rcu *inactive;
Daniel Mack30070982016-11-23 16:52:26 +010040};
41
42void cgroup_bpf_put(struct cgroup *cgrp);
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070043int cgroup_bpf_inherit(struct cgroup *cgrp);
Daniel Mack30070982016-11-23 16:52:26 +010044
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070045int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
46 enum bpf_attach_type type, u32 flags);
47int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
48 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070049int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
50 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010051
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070052/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
53int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
54 enum bpf_attach_type type, u32 flags);
55int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
56 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070057int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
58 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010059
David Ahernb2cd1252016-12-01 08:48:03 -080060int __cgroup_bpf_run_filter_skb(struct sock *sk,
61 struct sk_buff *skb,
62 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010063
David Ahern610236582016-12-01 08:48:04 -080064int __cgroup_bpf_run_filter_sk(struct sock *sk,
65 enum bpf_attach_type type);
66
Andrey Ignatov4fbac772018-03-30 15:08:02 -070067int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
68 struct sockaddr *uaddr,
69 enum bpf_attach_type type);
70
Lawrence Brakmo40304b22017-06-30 20:02:40 -070071int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
72 struct bpf_sock_ops_kern *sock_ops,
73 enum bpf_attach_type type);
74
Roman Gushchinebc614f2017-11-05 08:15:32 -050075int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
76 short access, enum bpf_attach_type type);
77
David Ahernb2cd1252016-12-01 08:48:03 -080078/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
79#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
80({ \
81 int __ret = 0; \
82 if (cgroup_bpf_enabled) \
83 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
84 BPF_CGROUP_INET_INGRESS); \
85 \
86 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010087})
88
David Ahernb2cd1252016-12-01 08:48:03 -080089#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
90({ \
91 int __ret = 0; \
92 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
93 typeof(sk) __sk = sk_to_full_sk(sk); \
94 if (sk_fullsock(__sk)) \
95 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
96 BPF_CGROUP_INET_EGRESS); \
97 } \
98 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010099})
100
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700101#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
David Ahern610236582016-12-01 08:48:04 -0800102({ \
103 int __ret = 0; \
Yafang Shaoee078622018-02-23 14:58:41 +0800104 if (cgroup_bpf_enabled) { \
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700105 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
David Ahern610236582016-12-01 08:48:04 -0800106 } \
107 __ret; \
108})
109
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700110#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
111 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
112
113#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
114 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
115
116#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
117 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
118
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700119#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
120({ \
121 int __ret = 0; \
122 if (cgroup_bpf_enabled) \
123 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
124 __ret; \
125})
126
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700127#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
128({ \
129 int __ret = 0; \
130 if (cgroup_bpf_enabled) { \
131 lock_sock(sk); \
132 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
133 release_sock(sk); \
134 } \
135 __ret; \
136})
137
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700138#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
139 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
140
141#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
142 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
143
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700144#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
145 sk->sk_prot->pre_connect)
146
147#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
148 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
149
150#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
151 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
152
153#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
154 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
155
156#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
157 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
158
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700159#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
160({ \
161 int __ret = 0; \
162 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
163 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -0700164 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700165 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
166 sock_ops, \
167 BPF_CGROUP_SOCK_OPS); \
168 } \
169 __ret; \
170})
Roman Gushchinebc614f2017-11-05 08:15:32 -0500171
172#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
173({ \
174 int __ret = 0; \
175 if (cgroup_bpf_enabled) \
176 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
177 access, \
178 BPF_CGROUP_DEVICE); \
179 \
180 __ret; \
181})
Daniel Mack30070982016-11-23 16:52:26 +0100182#else
183
184struct cgroup_bpf {};
185static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700186static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
Daniel Mack30070982016-11-23 16:52:26 +0100187
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700188#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
Daniel Mack30070982016-11-23 16:52:26 +0100189#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
190#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800191#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Andrey Ignatov4fbac772018-03-30 15:08:02 -0700192#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
193#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
Andrey Ignatovaac3fc32018-03-30 15:08:07 -0700194#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
195#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700196#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
197#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
198#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
199#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700200#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Roman Gushchinebc614f2017-11-05 08:15:32 -0500201#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100202
203#endif /* CONFIG_CGROUP_BPF */
204
205#endif /* _BPF_CGROUP_H */