syscall_filter: Add support for <, <=, >, >=
This change introduces four new comparison operators.
Bug: 111726641
Test: make tests
Test: echo 'read: arg1 < 0xff' | ./parse_seccomp_policy --dump - | \
./libseccomp/tools/scmp_bpf_disasm
Test: echo 'read: arg1 <= 0xff' | ./parse_seccomp_policy --dump - | \
./libseccomp/tools/scmp_bpf_disasm
Test: echo 'read: arg1 > 0xff' | ./parse_seccomp_policy --dump - | \
./libseccomp/tools/scmp_bpf_disasm
Test: echo 'read: arg1 >= 0xff' | ./parse_seccomp_policy --dump - | \
./libseccomp/tools/scmp_bpf_disasm
Change-Id: If6a1752d688748e9f0d0ad4902c3ae2982881b2e
diff --git a/bpf.c b/bpf.c
index ab26b9d..f4b980b 100644
--- a/bpf.c
+++ b/bpf.c
@@ -59,7 +59,7 @@
}
#endif
-/* Size-aware equality comparison. */
+/* Size-aware comparisons. */
size_t bpf_comp_jeq32(struct sock_filter *filter, unsigned long c,
unsigned char jt, unsigned char jf)
{
@@ -90,6 +90,63 @@
}
#endif
+size_t bpf_comp_jgt32(struct sock_filter *filter, unsigned long c,
+ unsigned char jt, unsigned char jf)
+{
+ unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
+ set_bpf_jump(filter, BPF_JMP + BPF_JGT + BPF_K, lo, jt, jf);
+ return 1U;
+}
+
+size_t bpf_comp_jge32(struct sock_filter *filter, unsigned long c,
+ unsigned char jt, unsigned char jf)
+{
+ unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
+ set_bpf_jump(filter, BPF_JMP + BPF_JGE + BPF_K, lo, jt, jf);
+ return 1U;
+}
+
+/*
+ * On 64 bits, we have to do two/three 32-bit comparisons.
+ * We jump true when the |hi| comparison is true *or* |hi| is equal and the
+ * |lo| comparison is true.
+ */
+#if defined(BITS64)
+size_t bpf_comp_jgt64(struct sock_filter *filter, uint64_t c, unsigned char jt,
+ unsigned char jf)
+{
+ unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
+ unsigned int hi = (unsigned int)(c >> 32);
+
+ struct sock_filter *curr_block = filter;
+
+ /* bpf_load_arg leaves |hi| in A. */
+ curr_block += bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
+ curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
+ set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
+ curr_block += bpf_comp_jgt32(curr_block, lo, jt, jf);
+
+ return curr_block - filter;
+}
+
+size_t bpf_comp_jge64(struct sock_filter *filter, uint64_t c, unsigned char jt,
+ unsigned char jf)
+{
+ unsigned int lo = (unsigned int)(c & 0xFFFFFFFF);
+ unsigned int hi = (unsigned int)(c >> 32);
+
+ struct sock_filter *curr_block = filter;
+
+ /* bpf_load_arg leaves |hi| in A. */
+ curr_block += bpf_comp_jgt32(curr_block, hi, SKIPN(3) + jt, NEXT);
+ curr_block += bpf_comp_jeq32(curr_block, hi, NEXT, SKIPN(2) + jf);
+ set_bpf_stmt(curr_block++, BPF_LD + BPF_MEM, 0); /* swap in |lo| */
+ curr_block += bpf_comp_jge32(curr_block, lo, jt, jf);
+
+ return curr_block - filter;
+}
+#endif
+
/* Size-aware bitwise AND. */
size_t bpf_comp_jset32(struct sock_filter *filter, unsigned long mask,
unsigned char jt, unsigned char jf)
@@ -133,11 +190,26 @@
return bpf_comp_jset(filter, negative_mask, jf, jt);
}
+static size_t bpf_arg_comp_len(int op)
+{
+ /* The comparisons that use gt/ge internally may have extra opcodes. */
+ switch (op) {
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ return BPF_ARG_GT_GE_COMP_LEN + 1;
+ default:
+ return BPF_ARG_COMP_LEN + 1;
+ }
+}
+
size_t bpf_arg_comp(struct sock_filter **pfilter, int op, int argidx,
unsigned long c, unsigned int label_id)
{
+ size_t filter_len = bpf_arg_comp_len(op);
struct sock_filter *filter =
- calloc(BPF_ARG_COMP_LEN + 1, sizeof(struct sock_filter));
+ calloc(filter_len, sizeof(struct sock_filter));
struct sock_filter *curr_block = filter;
size_t (*comp_function)(struct sock_filter * filter, unsigned long k,
unsigned char jt, unsigned char jf);
@@ -156,6 +228,22 @@
comp_function = bpf_comp_jeq;
flip = 1;
break;
+ case LT:
+ comp_function = bpf_comp_jge;
+ flip = 1;
+ break;
+ case LE:
+ comp_function = bpf_comp_jgt;
+ flip = 1;
+ break;
+ case GT:
+ comp_function = bpf_comp_jgt;
+ flip = 0;
+ break;
+ case GE:
+ comp_function = bpf_comp_jge;
+ flip = 0;
+ break;
case SET:
comp_function = bpf_comp_jset;
flip = 0;