blob: d41d40ac3efdb940bf96391d17ee737456cd24c2 [file] [log] [blame]
Daniel Mack30070982016-11-23 16:52:26 +01001#ifndef _BPF_CGROUP_H
2#define _BPF_CGROUP_H
3
Daniel Mack30070982016-11-23 16:52:26 +01004#include <linux/jump_label.h>
5#include <uapi/linux/bpf.h>
6
7struct sock;
8struct cgroup;
9struct sk_buff;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070010struct bpf_sock_ops_kern;
Daniel Mack30070982016-11-23 16:52:26 +010011
12#ifdef CONFIG_CGROUP_BPF
13
14extern struct static_key_false cgroup_bpf_enabled_key;
15#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
16
17struct cgroup_bpf {
18 /*
19 * Store two sets of bpf_prog pointers, one for programs that are
20 * pinned directly to this cgroup, and one for those that are effective
21 * when this cgroup is accessed.
22 */
23 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
Daniel Mackdcdc43d2016-12-15 10:53:21 +010024 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
Alexei Starovoitov7f677632017-02-10 20:28:24 -080025 bool disallow_override[MAX_BPF_ATTACH_TYPE];
Daniel Mack30070982016-11-23 16:52:26 +010026};
27
28void cgroup_bpf_put(struct cgroup *cgrp);
29void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
30
Alexei Starovoitov7f677632017-02-10 20:28:24 -080031int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
32 struct bpf_prog *prog, enum bpf_attach_type type,
33 bool overridable);
Daniel Mack30070982016-11-23 16:52:26 +010034
35/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
Alexei Starovoitov7f677632017-02-10 20:28:24 -080036int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
37 enum bpf_attach_type type, bool overridable);
Daniel Mack30070982016-11-23 16:52:26 +010038
David Ahernb2cd1252016-12-01 08:48:03 -080039int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb,
41 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010042
David Ahern610236582016-12-01 08:48:04 -080043int __cgroup_bpf_run_filter_sk(struct sock *sk,
44 enum bpf_attach_type type);
45
Lawrence Brakmo40304b22017-06-30 20:02:40 -070046int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
47 struct bpf_sock_ops_kern *sock_ops,
48 enum bpf_attach_type type);
49
David Ahernb2cd1252016-12-01 08:48:03 -080050/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
51#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
52({ \
53 int __ret = 0; \
54 if (cgroup_bpf_enabled) \
55 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
56 BPF_CGROUP_INET_INGRESS); \
57 \
58 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010059})
60
David Ahernb2cd1252016-12-01 08:48:03 -080061#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
62({ \
63 int __ret = 0; \
64 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
65 typeof(sk) __sk = sk_to_full_sk(sk); \
66 if (sk_fullsock(__sk)) \
67 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
68 BPF_CGROUP_INET_EGRESS); \
69 } \
70 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010071})
72
David Ahern610236582016-12-01 08:48:04 -080073#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
74({ \
75 int __ret = 0; \
76 if (cgroup_bpf_enabled && sk) { \
77 __ret = __cgroup_bpf_run_filter_sk(sk, \
78 BPF_CGROUP_INET_SOCK_CREATE); \
79 } \
80 __ret; \
81})
82
Lawrence Brakmo40304b22017-06-30 20:02:40 -070083#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
84({ \
85 int __ret = 0; \
86 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
87 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -070088 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -070089 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
90 sock_ops, \
91 BPF_CGROUP_SOCK_OPS); \
92 } \
93 __ret; \
94})
Daniel Mack30070982016-11-23 16:52:26 +010095#else
96
97struct cgroup_bpf {};
98static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
99static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
100 struct cgroup *parent) {}
101
102#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
103#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800104#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700105#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100106
107#endif /* CONFIG_CGROUP_BPF */
108
109#endif /* _BPF_CGROUP_H */