blob: ace92fce296d107e38b0e53dfb03e8165e45742e [file] [log] [blame]
Daniel Mackf791c422016-11-23 16:52:26 +01001#ifndef _BPF_CGROUP_H
2#define _BPF_CGROUP_H
3
Daniel Mackf791c422016-11-23 16:52:26 +01004#include <linux/jump_label.h>
5#include <uapi/linux/bpf.h>
6
7struct sock;
8struct cgroup;
9struct sk_buff;
10
11#ifdef CONFIG_CGROUP_BPF
12
13extern struct static_key_false cgroup_bpf_enabled_key;
14#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
15
16struct cgroup_bpf {
17 /*
18 * Store two sets of bpf_prog pointers, one for programs that are
19 * pinned directly to this cgroup, and one for those that are effective
20 * when this cgroup is accessed.
21 */
22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
23 struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
Alexei Starovoitov1ee2b4b2017-02-10 20:28:24 -080024 bool disallow_override[MAX_BPF_ATTACH_TYPE];
Daniel Mackf791c422016-11-23 16:52:26 +010025};
26
27void cgroup_bpf_put(struct cgroup *cgrp);
28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
29
Alexei Starovoitov1ee2b4b2017-02-10 20:28:24 -080030int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
31 struct bpf_prog *prog, enum bpf_attach_type type,
32 bool overridable);
Daniel Mackf791c422016-11-23 16:52:26 +010033
34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
Alexei Starovoitov1ee2b4b2017-02-10 20:28:24 -080035int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
36 enum bpf_attach_type type, bool overridable);
Daniel Mackf791c422016-11-23 16:52:26 +010037
38int __cgroup_bpf_run_filter(struct sock *sk,
39 struct sk_buff *skb,
40 enum bpf_attach_type type);
41
42/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
43#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) \
44({ \
45 int __ret = 0; \
46 if (cgroup_bpf_enabled) \
47 __ret = __cgroup_bpf_run_filter(sk, skb, \
48 BPF_CGROUP_INET_INGRESS); \
49 \
50 __ret; \
51})
52
53#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) \
54({ \
55 int __ret = 0; \
56 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
57 typeof(sk) __sk = sk_to_full_sk(sk); \
58 if (sk_fullsock(__sk)) \
59 __ret = __cgroup_bpf_run_filter(__sk, skb, \
60 BPF_CGROUP_INET_EGRESS); \
61 } \
62 __ret; \
63})
64
65#else
66
67struct cgroup_bpf {};
68static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
69static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
70 struct cgroup *parent) {}
71
72#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
73#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
74
75#endif /* CONFIG_CGROUP_BPF */
76
77#endif /* _BPF_CGROUP_H */