bpf: prepare bpf_int_jit_compile/bpf_prog_select_runtime apis
Since the blinding is strictly only called from inside eBPF JITs,
we need to change signatures for bpf_int_jit_compile() and
bpf_prog_select_runtime() first in order to prepare that the
eBPF program we're dealing with can change underneath. Hence,
for call sites, we need to return the latest prog. No functional
change in this patch.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ef35e86..dd42880 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -762,7 +762,7 @@
/* Nothing to do here. We support Internal BPF. */
}
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header;
struct jit_ctx ctx;
@@ -770,14 +770,14 @@
u8 *image_ptr;
if (!bpf_jit_enable)
- return;
+ return prog;
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL)
- return;
+ return prog;
/* 1. Initial fake pass to compute ctx->idx. */
@@ -828,6 +828,7 @@
prog->jited = 1;
out:
kfree(ctx.offset);
+ return prog;
}
void bpf_jit_free(struct bpf_prog *prog)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 3c0bfc1..fcf301a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1262,18 +1262,19 @@
/*
* Compile eBPF program "fp"
*/
-void bpf_int_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
struct bpf_binary_header *header;
struct bpf_jit jit;
int pass;
if (!bpf_jit_enable)
- return;
+ return fp;
+
memset(&jit, 0, sizeof(jit));
jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL)
- return;
+ return fp;
/*
* Three initial passes:
* - 1/2: Determine clobbered registers
@@ -1305,6 +1306,7 @@
}
free_addrs:
kfree(jit.addrs);
+ return fp;
}
/*
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f5bfd4f..6b2d23e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1073,7 +1073,7 @@
{
}
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
int proglen, oldproglen = 0;
@@ -1084,11 +1084,11 @@
int i;
if (!bpf_jit_enable)
- return;
+ return prog;
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs)
- return;
+ return prog;
/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
@@ -1140,6 +1140,7 @@
}
out:
kfree(addrs);
+ return prog;
}
void bpf_jit_free(struct bpf_prog *fp)