blob: 102e56fbb6dea46f47614a8ec6b35522fe45958b [file] [log] [blame]
Daniel Mack30070982016-11-23 16:52:26 +01001#ifndef _BPF_CGROUP_H
2#define _BPF_CGROUP_H
3
Daniel Mack30070982016-11-23 16:52:26 +01004#include <linux/jump_label.h>
5#include <uapi/linux/bpf.h>
6
7struct sock;
8struct cgroup;
9struct sk_buff;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070010struct bpf_sock_ops_kern;
Daniel Mack30070982016-11-23 16:52:26 +010011
12#ifdef CONFIG_CGROUP_BPF
13
14extern struct static_key_false cgroup_bpf_enabled_key;
15#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
16
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070017struct bpf_prog_list {
18 struct list_head node;
19 struct bpf_prog *prog;
20};
21
22struct bpf_prog_array;
23
Daniel Mack30070982016-11-23 16:52:26 +010024struct cgroup_bpf {
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070025 /* array of effective progs in this cgroup */
26 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
27
28 /* attached progs to this cgroup and attach flags
29 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
30 * have either zero or one element
31 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
Daniel Mack30070982016-11-23 16:52:26 +010032 */
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070033 struct list_head progs[MAX_BPF_ATTACH_TYPE];
34 u32 flags[MAX_BPF_ATTACH_TYPE];
35
36 /* temp storage for effective prog array used by prog_attach/detach */
37 struct bpf_prog_array __rcu *inactive;
Daniel Mack30070982016-11-23 16:52:26 +010038};
39
40void cgroup_bpf_put(struct cgroup *cgrp);
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070041int cgroup_bpf_inherit(struct cgroup *cgrp);
Daniel Mack30070982016-11-23 16:52:26 +010042
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070043int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
44 enum bpf_attach_type type, u32 flags);
45int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
46 enum bpf_attach_type type, u32 flags);
Daniel Mack30070982016-11-23 16:52:26 +010047
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070048/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
49int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
50 enum bpf_attach_type type, u32 flags);
51int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
52 enum bpf_attach_type type, u32 flags);
Daniel Mack30070982016-11-23 16:52:26 +010053
David Ahernb2cd1252016-12-01 08:48:03 -080054int __cgroup_bpf_run_filter_skb(struct sock *sk,
55 struct sk_buff *skb,
56 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010057
David Ahern610236582016-12-01 08:48:04 -080058int __cgroup_bpf_run_filter_sk(struct sock *sk,
59 enum bpf_attach_type type);
60
Lawrence Brakmo40304b22017-06-30 20:02:40 -070061int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
62 struct bpf_sock_ops_kern *sock_ops,
63 enum bpf_attach_type type);
64
David Ahernb2cd1252016-12-01 08:48:03 -080065/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
66#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
67({ \
68 int __ret = 0; \
69 if (cgroup_bpf_enabled) \
70 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
71 BPF_CGROUP_INET_INGRESS); \
72 \
73 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010074})
75
David Ahernb2cd1252016-12-01 08:48:03 -080076#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
77({ \
78 int __ret = 0; \
79 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
80 typeof(sk) __sk = sk_to_full_sk(sk); \
81 if (sk_fullsock(__sk)) \
82 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
83 BPF_CGROUP_INET_EGRESS); \
84 } \
85 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010086})
87
David Ahern610236582016-12-01 08:48:04 -080088#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
89({ \
90 int __ret = 0; \
91 if (cgroup_bpf_enabled && sk) { \
92 __ret = __cgroup_bpf_run_filter_sk(sk, \
93 BPF_CGROUP_INET_SOCK_CREATE); \
94 } \
95 __ret; \
96})
97
Lawrence Brakmo40304b22017-06-30 20:02:40 -070098#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
99({ \
100 int __ret = 0; \
101 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
102 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -0700103 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700104 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
105 sock_ops, \
106 BPF_CGROUP_SOCK_OPS); \
107 } \
108 __ret; \
109})
Daniel Mack30070982016-11-23 16:52:26 +0100110#else
111
112struct cgroup_bpf {};
113static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700114static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
Daniel Mack30070982016-11-23 16:52:26 +0100115
116#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
117#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800118#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700119#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100120
121#endif /* CONFIG_CGROUP_BPF */
122
123#endif /* _BPF_CGROUP_H */