113 lines
4.0 KiB
Diff
113 lines
4.0 KiB
Diff
From: Alexei Starovoitov <ast@fb.com>
|
|
Date: Wed, 22 Nov 2017 16:42:05 -0800
|
|
Subject: bpf: fix branch pruning logic
|
|
Origin: https://git.kernel.org/linus/c131187db2d3fa2f8bf32fdf4e9a4ef805168467
|
|
|
|
when the verifier detects that register contains a runtime constant
|
|
and it's compared with another constant it will prune exploration
|
|
of the branch that is guaranteed not to be taken at runtime.
|
|
This is all correct, but malicious program may be constructed
|
|
in such a way that it always has a constant comparison and
|
|
the other branch is never taken under any conditions.
|
|
In this case such path through the program will not be explored
|
|
by the verifier. It won't be taken at run-time either, but since
|
|
all instructions are JITed the malicious program may cause JITs
|
|
to complain about using reserved fields, etc.
|
|
To fix the issue we have to track the instructions explored by
|
|
the verifier and sanitize instructions that are dead at run time
|
|
with NOPs. We cannot reject such dead code, since llvm generates
|
|
it for valid C code, since it doesn't do as much data flow
|
|
analysis as the verifier does.
|
|
|
|
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
|
|
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
|
|
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
|
---
|
|
include/linux/bpf_verifier.h | 2 +-
|
|
kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++
|
|
2 files changed, 28 insertions(+), 1 deletion(-)
|
|
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -110,7 +110,7 @@ struct bpf_insn_aux_data {
|
|
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
|
};
|
|
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
|
- int converted_op_size; /* the valid value width after perceived conversion */
|
|
+ bool seen; /* this insn was processed by the verifier */
|
|
};
|
|
|
|
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -3695,6 +3695,7 @@ static int do_check(struct bpf_verifier_
|
|
if (err)
|
|
return err;
|
|
|
|
+ env->insn_aux_data[insn_idx].seen = true;
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
err = check_alu_op(env, insn);
|
|
if (err)
|
|
@@ -3885,6 +3886,7 @@ process_bpf_exit:
|
|
return err;
|
|
|
|
insn_idx++;
|
|
+ env->insn_aux_data[insn_idx].seen = true;
|
|
} else {
|
|
verbose(env, "invalid BPF_LD mode\n");
|
|
return -EINVAL;
|
|
@@ -4067,6 +4069,7 @@ static int adjust_insn_aux_data(struct b
|
|
u32 off, u32 cnt)
|
|
{
|
|
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
|
+ int i;
|
|
|
|
if (cnt == 1)
|
|
return 0;
|
|
@@ -4076,6 +4079,8 @@ static int adjust_insn_aux_data(struct b
|
|
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
|
|
memcpy(new_data + off + cnt - 1, old_data + off,
|
|
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
|
+ for (i = off; i < off + cnt - 1; i++)
|
|
+ new_data[i].seen = true;
|
|
env->insn_aux_data = new_data;
|
|
vfree(old_data);
|
|
return 0;
|
|
@@ -4094,6 +4099,25 @@ static struct bpf_prog *bpf_patch_insn_d
|
|
return new_prog;
|
|
}
|
|
|
|
+/* The verifier does more data flow analysis than llvm and will not explore
|
|
+ * branches that are dead at run time. Malicious programs can have dead code
|
|
+ * too. Therefore replace all dead at-run-time code with nops.
|
|
+ */
|
|
+static void sanitize_dead_code(struct bpf_verifier_env *env)
|
|
+{
|
|
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
|
|
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
|
|
+ struct bpf_insn *insn = env->prog->insnsi;
|
|
+ const int insn_cnt = env->prog->len;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < insn_cnt; i++) {
|
|
+ if (aux_data[i].seen)
|
|
+ continue;
|
|
+ memcpy(insn + i, &nop, sizeof(nop));
|
|
+ }
|
|
+}
|
|
+
|
|
/* convert load instructions that access fields of 'struct __sk_buff'
|
|
* into sequence of instructions that access fields of 'struct sk_buff'
|
|
*/
|
|
@@ -4410,6 +4434,9 @@ skip_full_check:
|
|
free_states(env);
|
|
|
|
if (ret == 0)
|
|
+ sanitize_dead_code(env);
|
|
+
|
|
+ if (ret == 0)
|
|
/* program is valid, convert *(u32*)(ctx + off) accesses */
|
|
ret = convert_ctx_accesses(env);
|
|
|