bpf/verifier: Fix multiple security issues (Closes: #883558)

This commit is contained in:
Ben Hutchings 2017-12-22 03:10:11 +00:00
parent a983b69920
commit cf1f6e2019
13 changed files with 2529 additions and 0 deletions

12
debian/changelog vendored
View File

@ -535,6 +535,18 @@ linux (4.14.7-1) UNRELEASED; urgency=medium
(CVE-2017-17741)
* bluetooth: Prevent stack info leak from the EFS element.
(CVE-2017-1000410)
* bpf/verifier: Fix multiple security issues (Closes: #883558):
- encapsulate verifier log state into a structure
- move global verifier log into verifier environment
- fix branch pruning logic
- fix bounds calculation on BPF_RSH
- fix incorrect sign extension in check_alu_op() (CVE-2017-16995)
- fix incorrect tracking of register size truncation (CVE-2017-16996)
- fix 32-bit ALU op verification
- fix missing error return in check_stack_boundary()
- force strict alignment checks for stack pointers
- don't prune branches when a scalar is replaced with a pointer
- fix integer overflows
-- Salvatore Bonaccorso <carnil@debian.org> Sun, 03 Dec 2017 10:18:39 +0100

View File

@ -0,0 +1,44 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:59 -0800
Subject: [7/9] bpf: don't prune branches when a scalar is replaced with a
pointer
Origin: https://git.kernel.org/linus/179d1c5602997fef5a940c6ddcf31212cbfebd14
This could be made safe by passing through a reference to env and checking
for env->allow_ptr_leaks, but it would only work one way and is probably
not worth the hassle - not doing it will not directly lead to program
rejection.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3366,15 +3366,14 @@ static bool regsafe(struct bpf_reg_state
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
- /* if we knew anything about the old value, we're not
- * equal, because we can't know anything about the
- * scalar value of the pointer in the new value.
+ /* We're trying to use a pointer in place of a scalar.
+ * Even if the scalar was unbounded, this could lead to
+ * pointer leaks because scalars are allowed to leak
+ * while pointers are not. We could make this safe in
+ * special cases if root is calling us, but it's
+ * probably not worth the hassle.
*/
- return rold->umin_value == 0 &&
- rold->umax_value == U64_MAX &&
- rold->smin_value == S64_MIN &&
- rold->smax_value == S64_MAX &&
- tnum_is_unknown(rold->var_off);
+ return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and

View File

@ -0,0 +1,201 @@
From: Jakub Kicinski <jakub.kicinski@netronome.com>
Date: Mon, 9 Oct 2017 10:30:10 -0700
Subject: bpf: encapsulate verifier log state into a structure
Origin: https://git.kernel.org/linus/e7bf8249e8f1bac64885eeccb55bcf6111901a81
Put the loose log_* variables into a structure. This will make
it simpler to remove the global verifier state in following patches.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/bpf_verifier.h | 13 ++++++++++
kernel/bpf/verifier.c | 57 +++++++++++++++++++++++---------------------
2 files changed, 43 insertions(+), 27 deletions(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,6 +115,19 @@ struct bpf_insn_aux_data {
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+struct bpf_verifer_log {
+ u32 level;
+ char *kbuf;
+ char __user *ubuf;
+ u32 len_used;
+ u32 len_total;
+};
+
+static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
+{
+ return log->len_used >= log->len_total - 1;
+}
+
struct bpf_verifier_env;
struct bpf_ext_analyzer_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,8 +156,7 @@ struct bpf_call_arg_meta {
/* verbose verifier prints what it's seeing
* bpf_check() is called under lock, so no race to access these global vars
*/
-static u32 log_level, log_size, log_len;
-static char *log_buf;
+static struct bpf_verifer_log verifier_log;
static DEFINE_MUTEX(bpf_verifier_lock);
@@ -167,13 +166,15 @@ static DEFINE_MUTEX(bpf_verifier_lock);
*/
static __printf(1, 2) void verbose(const char *fmt, ...)
{
+ struct bpf_verifer_log *log = &verifier_log;
va_list args;
- if (log_level == 0 || log_len >= log_size - 1)
+ if (!log->level || bpf_verifier_log_full(log))
return;
va_start(args, fmt);
- log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
+ log->len_used += vscnprintf(log->kbuf + log->len_used,
+ log->len_total - log->len_used, fmt, args);
va_end(args);
}
@@ -834,7 +835,7 @@ static int check_map_access(struct bpf_v
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
- if (log_level)
+ if (verifier_log.level)
print_verifier_state(state);
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
@@ -2915,7 +2916,7 @@ static int check_cond_jmp_op(struct bpf_
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
}
- if (log_level)
+ if (verifier_log.level)
print_verifier_state(this_branch);
return 0;
}
@@ -3633,7 +3634,7 @@ static int do_check(struct bpf_verifier_
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
- if (log_level) {
+ if (verifier_log.level) {
if (do_print_state)
verbose("\nfrom %d to %d: safe\n",
prev_insn_idx, insn_idx);
@@ -3646,8 +3647,9 @@ static int do_check(struct bpf_verifier_
if (need_resched())
cond_resched();
- if (log_level > 1 || (log_level && do_print_state)) {
- if (log_level > 1)
+ if (verifier_log.level > 1 ||
+ (verifier_log.level && do_print_state)) {
+ if (verifier_log.level > 1)
verbose("%d:", insn_idx);
else
verbose("\nfrom %d to %d:",
@@ -3656,7 +3658,7 @@ static int do_check(struct bpf_verifier_
do_print_state = false;
}
- if (log_level) {
+ if (verifier_log.level) {
verbose("%d: ", insn_idx);
print_bpf_insn(env, insn);
}
@@ -4307,7 +4309,7 @@ static void free_states(struct bpf_verif
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
- char __user *log_ubuf = NULL;
+ struct bpf_verifer_log *log = &verifier_log;
struct bpf_verifier_env *env;
int ret = -EINVAL;
@@ -4332,23 +4334,23 @@ int bpf_check(struct bpf_prog **prog, un
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
- log_level = attr->log_level;
- log_ubuf = (char __user *) (unsigned long) attr->log_buf;
- log_size = attr->log_size;
- log_len = 0;
+ log->level = attr->log_level;
+ log->ubuf = (char __user *) (unsigned long) attr->log_buf;
+ log->len_total = attr->log_size;
+ log->len_used = 0;
ret = -EINVAL;
- /* log_* values have to be sane */
- if (log_size < 128 || log_size > UINT_MAX >> 8 ||
- log_level == 0 || log_ubuf == NULL)
+ /* log attributes have to be sane */
+ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
+ !log->level || !log->ubuf)
goto err_unlock;
ret = -ENOMEM;
- log_buf = vmalloc(log_size);
- if (!log_buf)
+ log->kbuf = vmalloc(log->len_total);
+ if (!log->kbuf)
goto err_unlock;
} else {
- log_level = 0;
+ log->level = 0;
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
@@ -4385,15 +4387,16 @@ skip_full_check:
if (ret == 0)
ret = fixup_bpf_calls(env);
- if (log_level && log_len >= log_size - 1) {
- BUG_ON(log_len >= log_size);
+ if (log->level && bpf_verifier_log_full(log)) {
+ BUG_ON(log->len_used >= log->len_total);
/* verifier log exceeded user supplied buffer */
ret = -ENOSPC;
/* fall through to return what was recorded */
}
/* copy verifier log back to user space including trailing zero */
- if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
+ if (log->level && copy_to_user(log->ubuf, log->kbuf,
+ log->len_used + 1) != 0) {
ret = -EFAULT;
goto free_log_buf;
}
@@ -4420,8 +4423,8 @@ skip_full_check:
}
free_log_buf:
- if (log_level)
- vfree(log_buf);
+ if (log->level)
+ vfree(log->kbuf);
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
@@ -4458,7 +4461,7 @@ int bpf_analyzer(struct bpf_prog *prog,
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
- log_level = 0;
+ verifier_log.level = 0;
env->strict_alignment = false;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))

View File

@ -0,0 +1,82 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:56 -0800
Subject: [4/9] bpf: fix 32-bit ALU op verification
Origin: https://git.kernel.org/linus/468f6eafa6c44cb2c5d8aad35e12f06c240a812a
32-bit ALU ops operate on 32-bit values and have 32-bit outputs.
Adjust the verifier accordingly.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 28 +++++++++++++++++-----------
1 file changed, 17 insertions(+), 11 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2010,6 +2010,10 @@ static int adjust_ptr_min_max_vals(struc
return 0;
}
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
@@ -2020,12 +2024,8 @@ static int adjust_scalar_min_max_vals(st
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
+ u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
- if (BPF_CLASS(insn->code) != BPF_ALU64) {
- /* 32-bit ALU ops are (32,32)->64 */
- coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
- }
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@@ -2161,9 +2161,9 @@ static int adjust_scalar_min_max_vals(st
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
- if (umax_val > 63) {
- /* Shifts greater than 63 are undefined. This includes
- * shifts by a negative number.
+ if (umax_val >= insn_bitness) {
+ /* Shifts greater than 31 or 63 are undefined.
+ * This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
@@ -2189,9 +2189,9 @@ static int adjust_scalar_min_max_vals(st
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
- if (umax_val > 63) {
- /* Shifts greater than 63 are undefined. This includes
- * shifts by a negative number.
+ if (umax_val >= insn_bitness) {
+ /* Shifts greater than 31 or 63 are undefined.
+ * This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
@@ -2227,6 +2227,12 @@ static int adjust_scalar_min_max_vals(st
break;
}
+ if (BPF_CLASS(insn->code) != BPF_ALU64) {
+ /* 32-bit ALU ops are (32,32)->32 */
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
+ }
+
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;

View File

@ -0,0 +1,112 @@
From: Alexei Starovoitov <ast@fb.com>
Date: Wed, 22 Nov 2017 16:42:05 -0800
Subject: bpf: fix branch pruning logic
Origin: https://git.kernel.org/linus/c131187db2d3fa2f8bf32fdf4e9a4ef805168467
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
include/linux/bpf_verifier.h | 2 +-
kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++
2 files changed, 28 insertions(+), 1 deletion(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -110,7 +110,7 @@ struct bpf_insn_aux_data {
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3695,6 +3695,7 @@ static int do_check(struct bpf_verifier_
if (err)
return err;
+ env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -3885,6 +3886,7 @@ process_bpf_exit:
return err;
insn_idx++;
+ env->insn_aux_data[insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -4067,6 +4069,7 @@ static int adjust_insn_aux_data(struct b
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ int i;
if (cnt == 1)
return 0;
@@ -4076,6 +4079,8 @@ static int adjust_insn_aux_data(struct b
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ for (i = off; i < off + cnt - 1; i++)
+ new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@@ -4094,6 +4099,25 @@ static struct bpf_prog *bpf_patch_insn_d
return new_prog;
}
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (aux_data[i].seen)
+ continue;
+ memcpy(insn + i, &nop, sizeof(nop));
+ }
+}
+
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@@ -4410,6 +4434,9 @@ skip_full_check:
free_states(env);
if (ret == 0)
+ sanitize_dead_code(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);

View File

@ -0,0 +1,44 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:54 -0800
Subject: [2/9] bpf: fix incorrect sign extension in check_alu_op()
Origin: https://git.kernel.org/linus/95a762e2c8c942780948091f8f2a4f32fce1ac6f
Distinguish between
BPF_ALU64|BPF_MOV|BPF_K (load 32-bit immediate, sign-extended to 64-bit)
and BPF_ALU|BPF_MOV|BPF_K (load 32-bit immediate, zero-padded to 64-bit);
only perform sign extension in the first case.
Starting with v4.14, this is exploitable by unprivileged users as long as
the unprivileged_bpf_disabled sysctl isn't set.
Debian assigned CVE-2017-16995 for this issue.
v3:
- add CVE number (Ben Hutchings)
Fixes: 484611357c19 ("bpf: allow access into map value arrays")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2401,7 +2401,13 @@ static int check_alu_op(struct bpf_verif
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
- __mark_reg_known(regs + insn->dst_reg, insn->imm);
+ if (BPF_CLASS(insn->code) == BPF_ALU64) {
+ __mark_reg_known(regs + insn->dst_reg,
+ insn->imm);
+ } else {
+ __mark_reg_known(regs + insn->dst_reg,
+ (u32)insn->imm);
+ }
}
} else if (opcode > BPF_END) {

View File

@ -0,0 +1,119 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:55 -0800
Subject: [3/9] bpf: fix incorrect tracking of register size truncation
Origin: https://git.kernel.org/linus/0c17d1d2c61936401f4702e1846e2c19b200f958
Properly handle register truncation to a smaller size.
The old code first mirrors the clearing of the high 32 bits in the bitwise
tristate representation, which is correct. But then, it computes the new
arithmetic bounds as the intersection between the old arithmetic bounds and
the bounds resulting from the bitwise tristate representation. Therefore,
when coerce_reg_to_32() is called on a number with bounds
[0xffff'fff8, 0x1'0000'0007], the verifier computes
[0xffff'fff8, 0xffff'ffff] as bounds of the truncated number.
This is incorrect: The truncated number could also be in the range [0, 7],
and no meaningful arithmetic bounds can be computed in that case apart from
the obvious [0, 0xffff'ffff].
Starting with v4.14, this is exploitable by unprivileged users as long as
the unprivileged_bpf_disabled sysctl isn't set.
Debian assigned CVE-2017-16996 for this issue.
v2:
- flip the mask during arithmetic bounds calculation (Ben Hutchings)
v3:
- add CVE number (Ben Hutchings)
Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
[bwh: Backported to 4.14]
---
kernel/bpf/verifier.c | 44 +++++++++++++++++++++++++++-----------------
1 file changed, 27 insertions(+), 17 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1079,6 +1079,29 @@ static int check_ptr_alignment(struct bp
strict);
}
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+ u64 mask;
+
+ /* clear high bits in bit representation */
+ reg->var_off = tnum_cast(reg->var_off, size);
+
+ /* fix arithmetic bounds */
+ mask = ((u64)1 << (size * 8)) - 1;
+ if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+ reg->umin_value &= mask;
+ reg->umax_value &= mask;
+ } else {
+ reg->umin_value = 0;
+ reg->umax_value = mask;
+ }
+ reg->smin_value = reg->umin_value;
+ reg->smax_value = reg->umax_value;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -1217,9 +1240,7 @@ static int check_mem_access(struct bpf_v
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
state->regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
- state->regs[value_regno].var_off = tnum_cast(
- state->regs[value_regno].var_off, size);
- __update_reg_bounds(&state->regs[value_regno]);
+ coerce_reg_to_size(&state->regs[value_regno], size);
}
return err;
}
@@ -1765,14 +1786,6 @@ static int check_call(struct bpf_verifie
return 0;
}
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
- /* clear high 32 bits */
- reg->var_off = tnum_cast(reg->var_off, 4);
- /* Update bounds */
- __update_reg_bounds(reg);
-}
-
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
@@ -2010,8 +2023,8 @@ static int adjust_scalar_min_max_vals(st
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
- coerce_reg_to_32(dst_reg);
- coerce_reg_to_32(&src_reg);
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
@@ -2391,10 +2404,7 @@ static int check_alu_op(struct bpf_verif
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
- /* high 32 bits are known zero. */
- regs[insn->dst_reg].var_off = tnum_cast(
- regs[insn->dst_reg].var_off, 4);
- __update_reg_bounds(&regs[insn->dst_reg]);
+ coerce_reg_to_size(&regs[insn->dst_reg], 4);
}
} else {
/* case: R = imm

View File

@ -0,0 +1,121 @@
From: Alexei Starovoitov <ast@kernel.org>
Date: Mon, 18 Dec 2017 20:12:00 -0800
Subject: [8/9] bpf: fix integer overflows
Origin: https://git.kernel.org/linus/bb7f0f989ca7de1153bd128a40a71709e339fa03
There were various issues related to the limited size of integers used in
the verifier:
- `off + size` overflow in __check_map_access()
- `off + reg->off` overflow in check_mem_access()
- `off + reg->var_off.value` overflow or 32-bit truncation of
`reg->var_off.value` in check_mem_access()
- 32-bit truncation in check_stack_boundary()
Make sure that any integer math cannot overflow by not allowing
pointer math with large values.
Also reduce the scope of "scalar op scalar" tracking.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
include/linux/bpf_verifier.h | 4 ++--
kernel/bpf/verifier.c | 48 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 50 insertions(+), 2 deletions(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/
-#define BPF_MAX_VAR_OFF (1ULL << 31)
+#define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow.
*/
-#define BPF_MAX_VAR_SIZ INT_MAX
+#define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1812,6 +1812,41 @@ static bool signed_sub_overflows(s64 a,
return res > a;
}
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg,
+ enum bpf_reg_type type)
+{
+ bool known = tnum_is_const(reg->var_off);
+ s64 val = reg->var_off.value;
+ s64 smin = reg->smin_value;
+
+ if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+ verbose(env, "math between %s pointer and %lld is not allowed\n",
+ reg_type_str[type], val);
+ return false;
+ }
+
+ if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+ verbose(env, "%s pointer offset %d is not allowed\n",
+ reg_type_str[type], reg->off);
+ return false;
+ }
+
+ if (smin == S64_MIN) {
+ verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+ reg_type_str[type]);
+ return false;
+ }
+
+ if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+ verbose(env, "value %lld makes %s pointer be out of bounds\n",
+ smin, reg_type_str[type]);
+ return false;
+ }
+
+ return true;
+}
+
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
@@ -1880,6 +1915,10 @@ static int adjust_ptr_min_max_vals(struc
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
+ if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+ !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+ return -EINVAL;
+
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
@@ -2010,6 +2049,9 @@ static int adjust_ptr_min_max_vals(struc
return -EACCES;
}
+ if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+ return -EINVAL;
+
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
@@ -2039,6 +2081,12 @@ static int adjust_scalar_min_max_vals(st
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
+ if (!src_known &&
+ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+ __mark_reg_unknown(dst_reg);
+ return 0;
+ }
+
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||

View File

@ -0,0 +1,26 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:57 -0800
Subject: [5/9] bpf: fix missing error return in check_stack_boundary()
Origin: https://git.kernel.org/linus/ea25f914dc164c8d56b36147ecc86bc65f83c469
Prevent indirect stack accesses at non-constant addresses, which would
permit reading and corrupting spilled pointers.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 1 +
1 file changed, 1 insertion(+)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1320,6 +1320,7 @@ static int check_stack_boundary(struct b
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
+ return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||

View File

@ -0,0 +1,31 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:58 -0800
Subject: [6/9] bpf: force strict alignment checks for stack pointers
Origin: https://git.kernel.org/linus/a5ec6ae161d72f01411169a938fa5f8baea16e8f
Force strict alignment checks for stack pointers because the tracking of
stack spills relies on it; unaligned stack accesses can lead to corruption
of spilled registers, which is exploitable.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 5 +++++
1 file changed, 5 insertions(+)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1071,6 +1071,11 @@ static int check_ptr_alignment(struct bp
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
+ /* The stack spill tracking logic in check_stack_write()
+ * and check_stack_read() relies on stack accesses being
+ * aligned.
+ */
+ strict = true;
break;
default:
break;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
From: Edward Cree <ecree@solarflare.com>
Date: Mon, 18 Dec 2017 20:11:53 -0800
Subject: [1/9] bpf/verifier: fix bounds calculation on BPF_RSH
Origin: https://git.kernel.org/linus/4374f256ce8182019353c0c639bb8d0695b4c941
Incorrect signed bounds were being computed.
If the old upper signed bound was positive and the old lower signed bound was
negative, this could cause the new upper signed bound to be too low,
leading to security issues.
Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Edward Cree <ecree@solarflare.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
[jannh@google.com: changed description to reflect bug impact]
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2183,20 +2183,22 @@ static int adjust_scalar_min_max_vals(st
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
- /* BPF_RSH is an unsigned shift, so make the appropriate casts */
- if (dst_reg->smin_value < 0) {
- if (umin_val) {
- /* Sign bit will be cleared */
- dst_reg->smin_value = 0;
- } else {
- /* Lost sign bit information */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- }
- } else {
- dst_reg->smin_value =
- (u64)(dst_reg->smin_value) >> umax_val;
- }
+ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
+ * be negative, then either:
+ * 1) src_reg might be zero, so the sign bit of the result is
+ * unknown, so we lose our signed bounds
+ * 2) it's known negative, thus the unsigned bounds capture the
+ * signed bounds
+ * 3) the signed bounds cross zero, so they tell us nothing
+ * about the result
+ * If the value in dst_reg is known nonnegative, then again the
+ * unsigned bounts capture the signed bounds.
+ * Thus, in all cases it suffices to blow away our signed bounds
+ * and rely on inferring new ones from the unsigned bounds and
+ * var_off of the result.
+ */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);

11
debian/patches/series vendored
View File

@ -127,6 +127,17 @@ bugfix/all/media-dvb-usb-v2-lmedm04-move-ts2020-attach-to-dm04_.patch
bugfix/all/media-hdpvr-fix-an-error-handling-path-in-hdpvr_prob.patch
bugfix/all/kvm-fix-stack-out-of-bounds-read-in-write_mmio.patch
bugfix/all/bluetooth-prevent-stack-info-leak-from-the-efs-element.patch
bugfix/all/bpf-encapsulate-verifier-log-state-into-a-structure.patch
bugfix/all/bpf-move-global-verifier-log-into-verifier-environme.patch
bugfix/all/bpf-fix-branch-pruning-logic.patch
bugfix/all/bpf-verifier-fix-bounds-calculation-on-bpf_rsh.patch
bugfix/all/bpf-fix-incorrect-sign-extension-in-check_alu_op.patch
bugfix/all/bpf-fix-incorrect-tracking-of-register-size-truncati.patch
bugfix/all/bpf-fix-32-bit-alu-op-verification.patch
bugfix/all/bpf-fix-missing-error-return-in-check_stack_boundary.patch
bugfix/all/bpf-force-strict-alignment-checks-for-stack-pointers.patch
bugfix/all/bpf-don-t-prune-branches-when-a-scalar-is-replaced-w.patch
bugfix/all/bpf-fix-integer-overflows.patch
# Fix exported symbol versions
bugfix/all/module-disable-matching-missing-version-crc.patch