Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 1 | #ifndef _BPF_CGROUP_H |
| 2 | #define _BPF_CGROUP_H |
| 3 | |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 4 | #include <linux/jump_label.h> |
| 5 | #include <uapi/linux/bpf.h> |
| 6 | |
| 7 | struct sock; |
| 8 | struct cgroup; |
| 9 | struct sk_buff; |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame^] | 10 | struct bpf_sock_ops_kern; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 11 | |
| 12 | #ifdef CONFIG_CGROUP_BPF |
| 13 | |
| 14 | extern struct static_key_false cgroup_bpf_enabled_key; |
| 15 | #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) |
| 16 | |
| 17 | struct cgroup_bpf { |
| 18 | /* |
| 19 | * Store two sets of bpf_prog pointers, one for programs that are |
| 20 | * pinned directly to this cgroup, and one for those that are effective |
| 21 | * when this cgroup is accessed. |
| 22 | */ |
| 23 | struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; |
Daniel Mack | dcdc43d | 2016-12-15 10:53:21 +0100 | [diff] [blame] | 24 | struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 25 | bool disallow_override[MAX_BPF_ATTACH_TYPE]; |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 26 | }; |
| 27 | |
| 28 | void cgroup_bpf_put(struct cgroup *cgrp); |
| 29 | void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); |
| 30 | |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 31 | int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, |
| 32 | struct bpf_prog *prog, enum bpf_attach_type type, |
| 33 | bool overridable); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 34 | |
| 35 | /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ |
Alexei Starovoitov | 7f67763 | 2017-02-10 20:28:24 -0800 | [diff] [blame] | 36 | int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, |
| 37 | enum bpf_attach_type type, bool overridable); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 38 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 39 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
| 40 | struct sk_buff *skb, |
| 41 | enum bpf_attach_type type); |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 42 | |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 43 | int __cgroup_bpf_run_filter_sk(struct sock *sk, |
| 44 | enum bpf_attach_type type); |
| 45 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame^] | 46 | int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, |
| 47 | struct bpf_sock_ops_kern *sock_ops, |
| 48 | enum bpf_attach_type type); |
| 49 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 50 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ |
| 51 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ |
| 52 | ({ \ |
| 53 | int __ret = 0; \ |
| 54 | if (cgroup_bpf_enabled) \ |
| 55 | __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ |
| 56 | BPF_CGROUP_INET_INGRESS); \ |
| 57 | \ |
| 58 | __ret; \ |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 59 | }) |
| 60 | |
David Ahern | b2cd125 | 2016-12-01 08:48:03 -0800 | [diff] [blame] | 61 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ |
| 62 | ({ \ |
| 63 | int __ret = 0; \ |
| 64 | if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ |
| 65 | typeof(sk) __sk = sk_to_full_sk(sk); \ |
| 66 | if (sk_fullsock(__sk)) \ |
| 67 | __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ |
| 68 | BPF_CGROUP_INET_EGRESS); \ |
| 69 | } \ |
| 70 | __ret; \ |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 71 | }) |
| 72 | |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 73 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ |
| 74 | ({ \ |
| 75 | int __ret = 0; \ |
| 76 | if (cgroup_bpf_enabled && sk) { \ |
| 77 | __ret = __cgroup_bpf_run_filter_sk(sk, \ |
| 78 | BPF_CGROUP_INET_SOCK_CREATE); \ |
| 79 | } \ |
| 80 | __ret; \ |
| 81 | }) |
| 82 | |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame^] | 83 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ |
| 84 | ({ \ |
| 85 | int __ret = 0; \ |
| 86 | if (cgroup_bpf_enabled && (sock_ops)->sk) { \ |
| 87 | typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ |
| 88 | if (sk_fullsock(__sk)) \ |
| 89 | __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ |
| 90 | sock_ops, \ |
| 91 | BPF_CGROUP_SOCK_OPS); \ |
| 92 | } \ |
| 93 | __ret; \ |
| 94 | }) |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 95 | #else |
| 96 | |
| 97 | struct cgroup_bpf {}; |
| 98 | static inline void cgroup_bpf_put(struct cgroup *cgrp) {} |
| 99 | static inline void cgroup_bpf_inherit(struct cgroup *cgrp, |
| 100 | struct cgroup *parent) {} |
| 101 | |
| 102 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
| 103 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) |
David Ahern | 61023658 | 2016-12-01 08:48:04 -0800 | [diff] [blame] | 104 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) |
Lawrence Brakmo | 40304b2 | 2017-06-30 20:02:40 -0700 | [diff] [blame^] | 105 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) |
Daniel Mack | 3007098 | 2016-11-23 16:52:26 +0100 | [diff] [blame] | 106 | |
| 107 | #endif /* CONFIG_CGROUP_BPF */ |
| 108 | |
| 109 | #endif /* _BPF_CGROUP_H */ |