Update to 4.14.9

This commit is contained in:
Salvatore Bonaccorso 2018-01-05 11:15:40 +01:00
parent f587f7242e
commit a7b364e44f
16 changed files with 159 additions and 744 deletions

3
debian/changelog vendored
View File

@ -1,7 +1,8 @@
linux (4.14.8-1) UNRELEASED; urgency=medium
linux (4.14.9-1) UNRELEASED; urgency=medium
* New upstream stable update:
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.14.8
https://www.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.14.9
[ Ben Hutchings ]
* e1000e: Fix e1000_check_for_copper_link_ich8lan return value.

View File

@ -1,44 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:59 -0800
Subject: [7/9] bpf: don't prune branches when a scalar is replaced with a
pointer
Origin: https://git.kernel.org/linus/179d1c5602997fef5a940c6ddcf31212cbfebd14
This could be made safe by passing through a reference to env and checking
for env->allow_ptr_leaks, but it would only work one way and is probably
not worth the hassle - not doing it will not directly lead to program
rejection.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3366,15 +3366,14 @@ static bool regsafe(struct bpf_reg_state
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
- /* if we knew anything about the old value, we're not
- * equal, because we can't know anything about the
- * scalar value of the pointer in the new value.
+ /* We're trying to use a pointer in place of a scalar.
+ * Even if the scalar was unbounded, this could lead to
+ * pointer leaks because scalars are allowed to leak
+ * while pointers are not. We could make this safe in
+ * special cases if root is calling us, but it's
+ * probably not worth the hassle.
*/
- return rold->umin_value == 0 &&
- rold->umax_value == U64_MAX &&
- rold->smin_value == S64_MIN &&
- rold->smax_value == S64_MAX &&
- tnum_is_unknown(rold->var_off);
+ return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and

View File

@ -1,82 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:56 -0800
Subject: [4/9] bpf: fix 32-bit ALU op verification
Origin: https://git.kernel.org/linus/468f6eafa6c44cb2c5d8aad35e12f06c240a812a
32-bit ALU ops operate on 32-bit values and have 32-bit outputs.
Adjust the verifier accordingly.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 28 +++++++++++++++++-----------
1 file changed, 17 insertions(+), 11 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2010,6 +2010,10 @@ static int adjust_ptr_min_max_vals(struc
return 0;
}
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
@@ -2020,12 +2024,8 @@ static int adjust_scalar_min_max_vals(st
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
+ u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
- if (BPF_CLASS(insn->code) != BPF_ALU64) {
- /* 32-bit ALU ops are (32,32)->64 */
- coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
- }
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@@ -2161,9 +2161,9 @@ static int adjust_scalar_min_max_vals(st
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
- if (umax_val > 63) {
- /* Shifts greater than 63 are undefined. This includes
- * shifts by a negative number.
+ if (umax_val >= insn_bitness) {
+ /* Shifts greater than 31 or 63 are undefined.
+ * This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
@@ -2189,9 +2189,9 @@ static int adjust_scalar_min_max_vals(st
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
- if (umax_val > 63) {
- /* Shifts greater than 63 are undefined. This includes
- * shifts by a negative number.
+ if (umax_val >= insn_bitness) {
+ /* Shifts greater than 31 or 63 are undefined.
+ * This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
@@ -2227,6 +2227,12 @@ static int adjust_scalar_min_max_vals(st
break;
}
+ if (BPF_CLASS(insn->code) != BPF_ALU64) {
+ /* 32-bit ALU ops are (32,32)->32 */
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
+ }
+
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;

View File

@ -1,112 +0,0 @@
From: Alexei Starovoitov <ast@fb.com>
Date: Wed, 22 Nov 2017 16:42:05 -0800
Subject: bpf: fix branch pruning logic
Origin: https://git.kernel.org/linus/c131187db2d3fa2f8bf32fdf4e9a4ef805168467
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
include/linux/bpf_verifier.h | 2 +-
kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++
2 files changed, 28 insertions(+), 1 deletion(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -110,7 +110,7 @@ struct bpf_insn_aux_data {
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3695,6 +3695,7 @@ static int do_check(struct bpf_verifier_
if (err)
return err;
+ env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -3885,6 +3886,7 @@ process_bpf_exit:
return err;
insn_idx++;
+ env->insn_aux_data[insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
@@ -4067,6 +4069,7 @@ static int adjust_insn_aux_data(struct b
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ int i;
if (cnt == 1)
return 0;
@@ -4076,6 +4079,8 @@ static int adjust_insn_aux_data(struct b
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ for (i = off; i < off + cnt - 1; i++)
+ new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
@@ -4094,6 +4099,25 @@ static struct bpf_prog *bpf_patch_insn_d
return new_prog;
}
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (aux_data[i].seen)
+ continue;
+ memcpy(insn + i, &nop, sizeof(nop));
+ }
+}
+
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@@ -4410,6 +4434,9 @@ skip_full_check:
free_states(env);
if (ret == 0)
+ sanitize_dead_code(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);

View File

@ -1,44 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:54 -0800
Subject: [2/9] bpf: fix incorrect sign extension in check_alu_op()
Origin: https://git.kernel.org/linus/95a762e2c8c942780948091f8f2a4f32fce1ac6f
Distinguish between
BPF_ALU64|BPF_MOV|BPF_K (load 32-bit immediate, sign-extended to 64-bit)
and BPF_ALU|BPF_MOV|BPF_K (load 32-bit immediate, zero-padded to 64-bit);
only perform sign extension in the first case.
Starting with v4.14, this is exploitable by unprivileged users as long as
the unprivileged_bpf_disabled sysctl isn't set.
Debian assigned CVE-2017-16995 for this issue.
v3:
- add CVE number (Ben Hutchings)
Fixes: 484611357c19 ("bpf: allow access into map value arrays")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2401,7 +2401,13 @@ static int check_alu_op(struct bpf_verif
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
- __mark_reg_known(regs + insn->dst_reg, insn->imm);
+ if (BPF_CLASS(insn->code) == BPF_ALU64) {
+ __mark_reg_known(regs + insn->dst_reg,
+ insn->imm);
+ } else {
+ __mark_reg_known(regs + insn->dst_reg,
+ (u32)insn->imm);
+ }
}
} else if (opcode > BPF_END) {

View File

@ -1,119 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:55 -0800
Subject: [3/9] bpf: fix incorrect tracking of register size truncation
Origin: https://git.kernel.org/linus/0c17d1d2c61936401f4702e1846e2c19b200f958
Properly handle register truncation to a smaller size.
The old code first mirrors the clearing of the high 32 bits in the bitwise
tristate representation, which is correct. But then, it computes the new
arithmetic bounds as the intersection between the old arithmetic bounds and
the bounds resulting from the bitwise tristate representation. Therefore,
when coerce_reg_to_32() is called on a number with bounds
[0xffff'fff8, 0x1'0000'0007], the verifier computes
[0xffff'fff8, 0xffff'ffff] as bounds of the truncated number.
This is incorrect: The truncated number could also be in the range [0, 7],
and no meaningful arithmetic bounds can be computed in that case apart from
the obvious [0, 0xffff'ffff].
Starting with v4.14, this is exploitable by unprivileged users as long as
the unprivileged_bpf_disabled sysctl isn't set.
Debian assigned CVE-2017-16996 for this issue.
v2:
- flip the mask during arithmetic bounds calculation (Ben Hutchings)
v3:
- add CVE number (Ben Hutchings)
Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values")
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
[bwh: Backported to 4.14]
---
kernel/bpf/verifier.c | 44 +++++++++++++++++++++++++++-----------------
1 file changed, 27 insertions(+), 17 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1079,6 +1079,29 @@ static int check_ptr_alignment(struct bp
strict);
}
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+ u64 mask;
+
+ /* clear high bits in bit representation */
+ reg->var_off = tnum_cast(reg->var_off, size);
+
+ /* fix arithmetic bounds */
+ mask = ((u64)1 << (size * 8)) - 1;
+ if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+ reg->umin_value &= mask;
+ reg->umax_value &= mask;
+ } else {
+ reg->umin_value = 0;
+ reg->umax_value = mask;
+ }
+ reg->smin_value = reg->umin_value;
+ reg->smax_value = reg->umax_value;
+}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -1217,9 +1240,7 @@ static int check_mem_access(struct bpf_v
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
state->regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
- state->regs[value_regno].var_off = tnum_cast(
- state->regs[value_regno].var_off, size);
- __update_reg_bounds(&state->regs[value_regno]);
+ coerce_reg_to_size(&state->regs[value_regno], size);
}
return err;
}
@@ -1765,14 +1786,6 @@ static int check_call(struct bpf_verifie
return 0;
}
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
- /* clear high 32 bits */
- reg->var_off = tnum_cast(reg->var_off, 4);
- /* Update bounds */
- __update_reg_bounds(reg);
-}
-
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
@@ -2010,8 +2023,8 @@ static int adjust_scalar_min_max_vals(st
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
- coerce_reg_to_32(dst_reg);
- coerce_reg_to_32(&src_reg);
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
@@ -2391,10 +2404,7 @@ static int check_alu_op(struct bpf_verif
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
- /* high 32 bits are known zero. */
- regs[insn->dst_reg].var_off = tnum_cast(
- regs[insn->dst_reg].var_off, 4);
- __update_reg_bounds(&regs[insn->dst_reg]);
+ coerce_reg_to_size(&regs[insn->dst_reg], 4);
}
} else {
/* case: R = imm

View File

@ -20,74 +20,50 @@ Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
[carnil:
- adjust context, we previously change verbose() signature
- drop changes to include/linux/bpf_verifier.h already set
]
---
include/linux/bpf_verifier.h | 4 ++--
kernel/bpf/verifier.c | 48 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 50 insertions(+), 2 deletions(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/
-#define BPF_MAX_VAR_OFF (1ULL << 31)
+#define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow.
*/
-#define BPF_MAX_VAR_SIZ INT_MAX
+#define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1812,6 +1812,41 @@ static bool signed_sub_overflows(s64 a,
return res > a;
}
@@ -1821,25 +1821,25 @@ static bool check_reg_sane_offset(struct
s64 smin = reg->smin_value;
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg,
+ enum bpf_reg_type type)
+{
+ bool known = tnum_is_const(reg->var_off);
+ s64 val = reg->var_off.value;
+ s64 smin = reg->smin_value;
+
+ if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
- verbose("math between %s pointer and %lld is not allowed\n",
+ verbose(env, "math between %s pointer and %lld is not allowed\n",
+ reg_type_str[type], val);
+ return false;
+ }
+
+ if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
- verbose("%s pointer offset %d is not allowed\n",
+ verbose(env, "%s pointer offset %d is not allowed\n",
+ reg_type_str[type], reg->off);
+ return false;
+ }
+
+ if (smin == S64_MIN) {
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
- verbose("math between %s pointer and register with unbounded min value is not allowed\n",
+ verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+ reg_type_str[type]);
+ return false;
+ }
+
+ if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
- verbose("value %lld makes %s pointer be out of bounds\n",
+ verbose(env, "value %lld makes %s pointer be out of bounds\n",
+ smin, reg_type_str[type]);
+ return false;
+ }
+
+ return true;
+}
+
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
@@ -1880,6 +1915,10 @@ static int adjust_ptr_min_max_vals(struc
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
smin, reg_type_str[type]);
return false;
}
@@ -1919,6 +1919,10 @@ static int adjust_ptr_min_max_vals(struc
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
+ if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+ !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
@ -96,9 +72,9 @@ Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
@@ -2010,6 +2049,9 @@ static int adjust_ptr_min_max_vals(struc
return -EACCES;
}
@@ -2052,6 +2056,9 @@ static int adjust_ptr_min_max_vals(struc
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
+ if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+ return -EINVAL;
@ -106,16 +82,16 @@ Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
@@ -2039,6 +2081,12 @@ static int adjust_scalar_min_max_vals(st
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
@@ -2083,6 +2090,12 @@ static int adjust_scalar_min_max_vals(st
+ if (!src_known &&
+ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+ __mark_reg_unknown(dst_reg);
+ return 0;
+ }
+
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
+ if (!src_known &&
+ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}

View File

@ -1,26 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:57 -0800
Subject: [5/9] bpf: fix missing error return in check_stack_boundary()
Origin: https://git.kernel.org/linus/ea25f914dc164c8d56b36147ecc86bc65f83c469
Prevent indirect stack accesses at non-constant addresses, which would
permit reading and corrupting spilled pointers.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 1 +
1 file changed, 1 insertion(+)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1320,6 +1320,7 @@ static int check_stack_boundary(struct b
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
+ return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||

View File

@ -1,31 +0,0 @@
From: Jann Horn <jannh@google.com>
Date: Mon, 18 Dec 2017 20:11:58 -0800
Subject: [6/9] bpf: force strict alignment checks for stack pointers
Origin: https://git.kernel.org/linus/a5ec6ae161d72f01411169a938fa5f8baea16e8f
Force strict alignment checks for stack pointers because the tracking of
stack spills relies on it; unaligned stack accesses can lead to corruption
of spilled registers, which is exploitable.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 5 +++++
1 file changed, 5 insertions(+)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1071,6 +1071,11 @@ static int check_ptr_alignment(struct bp
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
+ /* The stack spill tracking logic in check_stack_write()
+ * and check_stack_read() relies on stack accesses being
+ * aligned.
+ */
+ strict = true;
break;
default:
break;

View File

@ -14,6 +14,7 @@ Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
[bwh: Backported to 4.14]
[carnil: refresh after 4.14.9 import]
---
include/linux/bpf_verifier.h | 2 +
kernel/bpf/verifier.c | 491 +++++++++++++++++++++++--------------------
@ -576,7 +577,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
@@ -1066,7 +1075,8 @@ static int check_ptr_alignment(struct bp
@@ -1071,7 +1080,8 @@ static int check_ptr_alignment(struct bp
default:
break;
}
@ -585,8 +586,8 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
+ strict);
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
@@ -1098,27 +1108,27 @@ static int check_mem_access(struct bpf_v
/* truncate register to smaller size (in bytes)
@@ -1126,27 +1136,27 @@ static int check_mem_access(struct bpf_v
if (reg->type == PTR_TO_MAP_VALUE) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
@ -618,7 +619,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
regno, reg->off, off - reg->off);
return -EACCES;
}
@@ -1126,7 +1136,8 @@ static int check_mem_access(struct bpf_v
@@ -1154,7 +1164,8 @@ static int check_mem_access(struct bpf_v
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@ -628,7 +629,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
tn_buf, off, size);
return -EACCES;
}
@@ -1137,9 +1148,10 @@ static int check_mem_access(struct bpf_v
@@ -1165,9 +1176,10 @@ static int check_mem_access(struct bpf_v
* the offset is zero.
*/
if (reg_type == SCALAR_VALUE)
@ -641,7 +642,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
state->regs[value_regno].id = 0;
state->regs[value_regno].off = 0;
state->regs[value_regno].range = 0;
@@ -1155,13 +1167,14 @@ static int check_mem_access(struct bpf_v
@@ -1183,13 +1195,14 @@ static int check_mem_access(struct bpf_v
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@ -658,7 +659,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
@@ -1172,28 +1185,31 @@ static int check_mem_access(struct bpf_v
@@ -1200,28 +1213,31 @@ static int check_mem_access(struct bpf_v
if (!env->allow_ptr_leaks &&
state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
size != BPF_REG_SIZE) {
@ -697,7 +698,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
regno, reg_type_str[reg->type]);
return -EACCES;
}
@@ -1214,7 +1230,7 @@ static int check_xadd(struct bpf_verifie
@@ -1240,7 +1256,7 @@ static int check_xadd(struct bpf_verifie
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
@ -706,7 +707,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -1229,7 +1245,7 @@ static int check_xadd(struct bpf_verifie
@@ -1255,7 +1271,7 @@ static int check_xadd(struct bpf_verifie
return err;
if (is_pointer_value(env, insn->src_reg)) {
@ -715,7 +716,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
@@ -1270,7 +1286,7 @@ static int check_stack_boundary(struct b
@@ -1296,7 +1312,7 @@ static int check_stack_boundary(struct b
register_is_null(regs[regno]))
return 0;
@ -724,13 +725,14 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
reg_type_str[regs[regno].type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
@@ -1281,13 +1297,13 @@ static int check_stack_boundary(struct b
@@ -1307,14 +1323,14 @@ static int check_stack_boundary(struct b
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
- verbose("invalid variable stack read R%d var_off=%s\n",
+ verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@ -740,7 +742,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
regno, off, access_size);
return -EACCES;
}
@@ -1303,7 +1319,7 @@ static int check_stack_boundary(struct b
@@ -1330,7 +1346,7 @@ static int check_stack_boundary(struct b
for (i = 0; i < access_size; i++) {
if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
@ -749,7 +751,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
off, i, access_size);
return -EACCES;
}
@@ -1345,7 +1361,8 @@ static int check_func_arg(struct bpf_ver
@@ -1372,7 +1388,8 @@ static int check_func_arg(struct bpf_ver
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
@ -759,7 +761,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
return 0;
@@ -1353,7 +1370,7 @@ static int check_func_arg(struct bpf_ver
@@ -1380,7 +1397,7 @@ static int check_func_arg(struct bpf_ver
if (type == PTR_TO_PACKET &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
@ -768,7 +770,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
@@ -1389,7 +1406,7 @@ static int check_func_arg(struct bpf_ver
@@ -1416,7 +1433,7 @@ static int check_func_arg(struct bpf_ver
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else {
@ -777,7 +779,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EFAULT;
}
@@ -1407,7 +1424,7 @@ static int check_func_arg(struct bpf_ver
@@ -1434,7 +1451,7 @@ static int check_func_arg(struct bpf_ver
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
@ -786,7 +788,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
if (type == PTR_TO_PACKET)
@@ -1423,7 +1440,7 @@ static int check_func_arg(struct bpf_ver
@@ -1450,7 +1467,7 @@ static int check_func_arg(struct bpf_ver
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
@ -795,7 +797,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
if (type == PTR_TO_PACKET)
@@ -1443,7 +1460,8 @@ static int check_func_arg(struct bpf_ver
@@ -1470,7 +1487,8 @@ static int check_func_arg(struct bpf_ver
*/
if (regno == 0) {
/* kernel subsystem misconfigured verifier */
@ -805,7 +807,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
@@ -1460,7 +1478,7 @@ static int check_func_arg(struct bpf_ver
@@ -1487,7 +1505,7 @@ static int check_func_arg(struct bpf_ver
meta = NULL;
if (reg->smin_value < 0) {
@ -814,7 +816,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
regno);
return -EACCES;
}
@@ -1474,7 +1492,7 @@ static int check_func_arg(struct bpf_ver
@@ -1501,7 +1519,7 @@ static int check_func_arg(struct bpf_ver
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
@ -823,7 +825,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
regno);
return -EACCES;
}
@@ -1485,12 +1503,13 @@ static int check_func_arg(struct bpf_ver
@@ -1512,12 +1530,13 @@ static int check_func_arg(struct bpf_ver
return err;
err_type:
@ -839,7 +841,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
{
if (!map)
return 0;
@@ -1576,7 +1595,7 @@ static int check_map_func_compatibility(
@@ -1603,7 +1622,7 @@ static int check_map_func_compatibility(
return 0;
error:
@ -848,7 +850,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
}
@@ -1611,7 +1630,7 @@ static void clear_all_pkt_pointers(struc
@@ -1638,7 +1657,7 @@ static void clear_all_pkt_pointers(struc
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET ||
regs[i].type == PTR_TO_PACKET_END)
@ -857,7 +859,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
@@ -1635,7 +1654,8 @@ static int check_call(struct bpf_verifie
@@ -1662,7 +1681,8 @@ static int check_call(struct bpf_verifie
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
@ -867,7 +869,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -1643,13 +1663,14 @@ static int check_call(struct bpf_verifie
@@ -1670,13 +1690,14 @@ static int check_call(struct bpf_verifie
fn = env->prog->aux->ops->get_func_proto(func_id);
if (!fn) {
@ -884,7 +886,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -1663,7 +1684,7 @@ static int check_call(struct bpf_verifie
@@ -1690,7 +1711,7 @@ static int check_call(struct bpf_verifie
*/
err = check_raw_mode(fn);
if (err) {
@ -893,7 +895,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
func_id_name(func_id), func_id);
return err;
}
@@ -1696,14 +1717,14 @@ static int check_call(struct bpf_verifie
@@ -1723,14 +1744,14 @@ static int check_call(struct bpf_verifie
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
@ -910,7 +912,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
@@ -1711,14 +1732,15 @@ static int check_call(struct bpf_verifie
@@ -1738,14 +1759,15 @@ static int check_call(struct bpf_verifie
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
@ -928,7 +930,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
@@ -1729,12 +1751,12 @@ static int check_call(struct bpf_verifie
@@ -1756,12 +1778,12 @@ static int check_call(struct bpf_verifie
else if (insn_aux->map_ptr != meta.map_ptr)
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
} else {
@ -943,7 +945,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (err)
return err;
@@ -1793,39 +1815,42 @@ static int adjust_ptr_min_max_vals(struc
@@ -1847,39 +1869,42 @@ static int adjust_ptr_min_max_vals(struc
dst_reg = &regs[dst];
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
@ -994,7 +996,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
dst);
return -EACCES;
}
@@ -1890,7 +1915,7 @@ static int adjust_ptr_min_max_vals(struc
@@ -1948,7 +1973,7 @@ static int adjust_ptr_min_max_vals(struc
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
if (!env->allow_ptr_leaks)
@ -1003,7 +1005,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
dst);
return -EACCES;
}
@@ -1900,7 +1925,7 @@ static int adjust_ptr_min_max_vals(struc
@@ -1958,7 +1983,7 @@ static int adjust_ptr_min_max_vals(struc
*/
if (ptr_reg->type == PTR_TO_STACK) {
if (!env->allow_ptr_leaks)
@ -1012,7 +1014,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
dst);
return -EACCES;
}
@@ -1955,13 +1980,13 @@ static int adjust_ptr_min_max_vals(struc
@@ -2013,13 +2038,13 @@ static int adjust_ptr_min_max_vals(struc
* ptr &= ~3 which would reduce min_value by 3.)
*/
if (!env->allow_ptr_leaks)
@ -1028,25 +1030,25 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
}
@@ -2127,7 +2152,7 @@ static int adjust_scalar_min_max_vals(st
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
@@ -2194,7 +2219,7 @@ static int adjust_scalar_min_max_vals(st
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
- mark_reg_unknown(regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
@@ -2155,7 +2180,7 @@ static int adjust_scalar_min_max_vals(st
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
@@ -2222,7 +2247,7 @@ static int adjust_scalar_min_max_vals(st
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
- mark_reg_unknown(regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
@@ -2183,7 +2208,7 @@ static int adjust_scalar_min_max_vals(st
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
@@ -2252,7 +2277,7 @@ static int adjust_scalar_min_max_vals(st
__update_reg_bounds(dst_reg);
break;
default:
@ -1055,7 +1057,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
break;
}
@@ -2215,12 +2240,12 @@ static int adjust_reg_min_max_vals(struc
@@ -2290,12 +2315,12 @@ static int adjust_reg_min_max_vals(struc
* an arbitrary scalar.
*/
if (!env->allow_ptr_leaks) {
@ -1070,7 +1072,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return 0;
} else {
/* scalar += pointer
@@ -2272,13 +2297,13 @@ static int adjust_reg_min_max_vals(struc
@@ -2347,13 +2372,13 @@ static int adjust_reg_min_max_vals(struc
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
@ -1088,7 +1090,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
@@ -2296,14 +2321,14 @@ static int check_alu_op(struct bpf_verif
@@ -2371,14 +2396,14 @@ static int check_alu_op(struct bpf_verif
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
@ -1105,7 +1107,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -2314,7 +2339,7 @@ static int check_alu_op(struct bpf_verif
@@ -2389,7 +2414,7 @@ static int check_alu_op(struct bpf_verif
return err;
if (is_pointer_value(env, insn->dst_reg)) {
@ -1114,7 +1116,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
insn->dst_reg);
return -EACCES;
}
@@ -2328,7 +2353,7 @@ static int check_alu_op(struct bpf_verif
@@ -2403,7 +2428,7 @@ static int check_alu_op(struct bpf_verif
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
@ -1123,7 +1125,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -2338,7 +2363,7 @@ static int check_alu_op(struct bpf_verif
@@ -2413,7 +2438,7 @@ static int check_alu_op(struct bpf_verif
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
@ -1132,7 +1134,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -2358,11 +2383,12 @@ static int check_alu_op(struct bpf_verif
@@ -2433,11 +2458,12 @@ static int check_alu_op(struct bpf_verif
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
@ -1144,10 +1146,10 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
- mark_reg_unknown(regs, insn->dst_reg);
+ mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
@@ -2377,14 +2403,14 @@ static int check_alu_op(struct bpf_verif
coerce_reg_to_size(&regs[insn->dst_reg], 4);
}
} else {
@@ -2455,14 +2481,14 @@ static int check_alu_op(struct bpf_verif
}
} else if (opcode > BPF_END) {
@ -1164,7 +1166,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
/* check src1 operand */
@@ -2393,7 +2419,7 @@ static int check_alu_op(struct bpf_verif
@@ -2471,7 +2497,7 @@ static int check_alu_op(struct bpf_verif
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
@ -1173,7 +1175,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -2405,7 +2431,7 @@ static int check_alu_op(struct bpf_verif
@@ -2483,7 +2509,7 @@ static int check_alu_op(struct bpf_verif
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
@ -1182,7 +1184,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -2414,7 +2440,7 @@ static int check_alu_op(struct bpf_verif
@@ -2492,7 +2518,7 @@ static int check_alu_op(struct bpf_verif
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
@ -1191,7 +1193,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -2775,13 +2801,13 @@ static int check_cond_jmp_op(struct bpf_
@@ -2853,13 +2879,13 @@ static int check_cond_jmp_op(struct bpf_
int err;
if (opcode > BPF_JSLE) {
@ -1207,7 +1209,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -2791,13 +2817,13 @@ static int check_cond_jmp_op(struct bpf_
@@ -2869,13 +2895,13 @@ static int check_cond_jmp_op(struct bpf_
return err;
if (is_pointer_value(env, insn->src_reg)) {
@ -1223,7 +1225,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -2913,11 +2939,12 @@ static int check_cond_jmp_op(struct bpf_
@@ -2991,11 +3017,12 @@ static int check_cond_jmp_op(struct bpf_
/* pkt_end <= pkt_data' */
find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
} else if (is_pointer_value(env, insn->dst_reg)) {
@ -1239,7 +1241,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return 0;
}
@@ -2936,11 +2963,11 @@ static int check_ld_imm(struct bpf_verif
@@ -3014,11 +3041,11 @@ static int check_ld_imm(struct bpf_verif
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
@ -1253,7 +1255,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -2998,14 +3025,14 @@ static int check_ld_abs(struct bpf_verif
@@ -3076,14 +3103,14 @@ static int check_ld_abs(struct bpf_verif
int i, err;
if (!may_access_skb(env->prog->type)) {
@ -1270,7 +1272,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3015,7 +3042,8 @@ static int check_ld_abs(struct bpf_verif
@@ -3093,7 +3120,8 @@ static int check_ld_abs(struct bpf_verif
return err;
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
@ -1280,7 +1282,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3028,7 +3056,7 @@ static int check_ld_abs(struct bpf_verif
@@ -3106,7 +3134,7 @@ static int check_ld_abs(struct bpf_verif
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
@ -1289,7 +1291,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
@@ -3036,7 +3064,7 @@ static int check_ld_abs(struct bpf_verif
@@ -3114,7 +3142,7 @@ static int check_ld_abs(struct bpf_verif
* the value fetched from the packet.
* Already marked as written above.
*/
@ -1298,7 +1300,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return 0;
}
@@ -3100,7 +3128,7 @@ static int push_insn(int t, int w, int e
@@ -3178,7 +3206,7 @@ static int push_insn(int t, int w, int e
return 0;
if (w < 0 || w >= env->prog->len) {
@ -1307,7 +1309,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3117,13 +3145,13 @@ static int push_insn(int t, int w, int e
@@ -3195,13 +3223,13 @@ static int push_insn(int t, int w, int e
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
@ -1323,7 +1325,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EFAULT;
}
return 0;
@@ -3217,7 +3245,7 @@ peek_stack:
@@ -3295,7 +3323,7 @@ peek_stack:
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
@ -1332,7 +1334,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
ret = -EFAULT;
goto err_free;
}
@@ -3226,7 +3254,7 @@ mark_explored:
@@ -3304,7 +3332,7 @@ mark_explored:
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
@ -1341,7 +1343,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
ret = -EINVAL;
goto err_free;
}
@@ -3606,7 +3634,7 @@ static int do_check(struct bpf_verifier_
@@ -3683,7 +3711,7 @@ static int do_check(struct bpf_verifier_
int insn_processed = 0;
bool do_print_state = false;
@ -1350,7 +1352,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
state->parent = NULL;
insn_idx = 0;
for (;;) {
@@ -3615,7 +3643,7 @@ static int do_check(struct bpf_verifier_
@@ -3692,7 +3720,7 @@ static int do_check(struct bpf_verifier_
int err;
if (insn_idx >= insn_cnt) {
@ -1359,7 +1361,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
insn_idx, insn_cnt);
return -EFAULT;
}
@@ -3624,7 +3652,8 @@ static int do_check(struct bpf_verifier_
@@ -3701,7 +3729,8 @@ static int do_check(struct bpf_verifier_
class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
@ -1369,7 +1371,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
insn_processed);
return -E2BIG;
}
@@ -3634,12 +3663,12 @@ static int do_check(struct bpf_verifier_
@@ -3711,12 +3740,12 @@ static int do_check(struct bpf_verifier_
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
@ -1385,7 +1387,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
goto process_bpf_exit;
}
@@ -3647,19 +3676,18 @@ static int do_check(struct bpf_verifier_
@@ -3724,19 +3753,18 @@ static int do_check(struct bpf_verifier_
if (need_resched())
cond_resched();
@ -1412,7 +1414,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
print_bpf_insn(env, insn);
}
@@ -3716,7 +3744,7 @@ static int do_check(struct bpf_verifier_
@@ -3794,7 +3822,7 @@ static int do_check(struct bpf_verifier_
* src_reg == stack|map in some other branch.
* Reject it.
*/
@ -1421,7 +1423,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3756,14 +3784,14 @@ static int do_check(struct bpf_verifier_
@@ -3834,14 +3862,14 @@ static int do_check(struct bpf_verifier_
} else if (dst_reg_type != *prev_dst_type &&
(dst_reg_type == PTR_TO_CTX ||
*prev_dst_type == PTR_TO_CTX)) {
@ -1438,7 +1440,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
/* check src operand */
@@ -3786,7 +3814,7 @@ static int do_check(struct bpf_verifier_
@@ -3864,7 +3892,7 @@ static int do_check(struct bpf_verifier_
insn->off != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
@ -1447,7 +1449,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3799,7 +3827,7 @@ static int do_check(struct bpf_verifier_
@@ -3877,7 +3905,7 @@ static int do_check(struct bpf_verifier_
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
@ -1456,7 +1458,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3811,7 +3839,7 @@ static int do_check(struct bpf_verifier_
@@ -3889,7 +3917,7 @@ static int do_check(struct bpf_verifier_
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
@ -1465,7 +1467,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3826,7 +3854,7 @@ static int do_check(struct bpf_verifier_
@@ -3904,7 +3932,7 @@ static int do_check(struct bpf_verifier_
return err;
if (is_pointer_value(env, BPF_REG_0)) {
@ -1474,9 +1476,9 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EACCES;
}
@@ -3858,19 +3886,19 @@ process_bpf_exit:
@@ -3937,19 +3965,19 @@ process_bpf_exit:
insn_idx++;
env->insn_aux_data[insn_idx].seen = true;
} else {
- verbose("invalid BPF_LD mode\n");
+ verbose(env, "invalid BPF_LD mode\n");
@ -1498,7 +1500,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return 0;
}
@@ -3882,7 +3910,8 @@ static int check_map_prealloc(struct bpf
@@ -3961,7 +3989,8 @@ static int check_map_prealloc(struct bpf
!(map->map_flags & BPF_F_NO_PREALLOC);
}
@ -1508,7 +1510,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
struct bpf_prog *prog)
{
@@ -3893,12 +3922,12 @@ static int check_map_prog_compatibility(
@@ -3972,12 +4001,12 @@ static int check_map_prog_compatibility(
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
@ -1523,7 +1525,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
}
@@ -3921,14 +3950,14 @@ static int replace_map_fd_with_map_ptr(s
@@ -4000,14 +4029,14 @@ static int replace_map_fd_with_map_ptr(s
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
@ -1540,7 +1542,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3939,7 +3968,7 @@ static int replace_map_fd_with_map_ptr(s
@@ -4018,7 +4047,7 @@ static int replace_map_fd_with_map_ptr(s
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
@ -1549,7 +1551,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -3948,19 +3977,20 @@ static int replace_map_fd_with_map_ptr(s
@@ -4027,19 +4056,20 @@ static int replace_map_fd_with_map_ptr(s
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
@ -1573,7 +1575,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (err) {
fdput(f);
return err;
@@ -4082,7 +4112,7 @@ static int convert_ctx_accesses(struct b
@@ -4183,7 +4213,7 @@ static int convert_ctx_accesses(struct b
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {
@ -1582,7 +1584,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
} else if (cnt) {
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
@@ -4130,7 +4160,7 @@ static int convert_ctx_accesses(struct b
@@ -4231,7 +4261,7 @@ static int convert_ctx_accesses(struct b
u8 size_code;
if (type == BPF_WRITE) {
@ -1591,7 +1593,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -4149,7 +4179,7 @@ static int convert_ctx_accesses(struct b
@@ -4250,7 +4280,7 @@ static int convert_ctx_accesses(struct b
&target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
@ -1600,7 +1602,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -4231,7 +4261,7 @@ static int fixup_bpf_calls(struct bpf_ve
@@ -4332,7 +4362,7 @@ static int fixup_bpf_calls(struct bpf_ve
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
@ -1609,7 +1611,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -EINVAL;
}
@@ -4275,7 +4305,8 @@ patch_call_imm:
@@ -4376,7 +4406,8 @@ patch_call_imm:
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
@ -1619,7 +1621,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
@@ -4309,8 +4340,8 @@ static void free_states(struct bpf_verif
@@ -4410,8 +4441,8 @@ static void free_states(struct bpf_verif
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
@ -1629,7 +1631,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
int ret = -EINVAL;
/* 'struct bpf_verifier_env' can be global, but since it's not small,
@@ -4319,6 +4350,7 @@ int bpf_check(struct bpf_prog **prog, un
@@ -4420,6 +4451,7 @@ int bpf_check(struct bpf_prog **prog, un
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
@ -1637,7 +1639,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
(*prog)->len);
@@ -4337,7 +4369,6 @@ int bpf_check(struct bpf_prog **prog, un
@@ -4438,7 +4470,6 @@ int bpf_check(struct bpf_prog **prog, un
log->level = attr->log_level;
log->ubuf = (char __user *) (unsigned long) attr->log_buf;
log->len_total = attr->log_size;
@ -1645,7 +1647,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
ret = -EINVAL;
/* log attributes have to be sane */
@@ -4349,8 +4380,6 @@ int bpf_check(struct bpf_prog **prog, un
@@ -4450,8 +4481,6 @@ int bpf_check(struct bpf_prog **prog, un
log->kbuf = vmalloc(log->len_total);
if (!log->kbuf)
goto err_unlock;
@ -1654,7 +1656,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
@@ -4461,8 +4490,6 @@ int bpf_analyzer(struct bpf_prog *prog,
@@ -4565,8 +4594,6 @@ int bpf_analyzer(struct bpf_prog *prog,
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);

View File

@ -1,61 +0,0 @@
From: Edward Cree <ecree@solarflare.com>
Date: Mon, 18 Dec 2017 20:11:53 -0800
Subject: [1/9] bpf/verifier: fix bounds calculation on BPF_RSH
Origin: https://git.kernel.org/linus/4374f256ce8182019353c0c639bb8d0695b4c941
Incorrect signed bounds were being computed.
If the old upper signed bound was positive and the old lower signed bound was
negative, this could cause the new upper signed bound to be too low,
leading to security issues.
Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Edward Cree <ecree@solarflare.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
[jannh@google.com: changed description to reflect bug impact]
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
kernel/bpf/verifier.c | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2183,20 +2183,22 @@ static int adjust_scalar_min_max_vals(st
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
- /* BPF_RSH is an unsigned shift, so make the appropriate casts */
- if (dst_reg->smin_value < 0) {
- if (umin_val) {
- /* Sign bit will be cleared */
- dst_reg->smin_value = 0;
- } else {
- /* Lost sign bit information */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- }
- } else {
- dst_reg->smin_value =
- (u64)(dst_reg->smin_value) >> umax_val;
- }
+ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
+ * be negative, then either:
+ * 1) src_reg might be zero, so the sign bit of the result is
+ * unknown, so we lose our signed bounds
+ * 2) it's known negative, thus the unsigned bounds capture the
+ * signed bounds
+ * 3) the signed bounds cross zero, so they tell us nothing
+ * about the result
+ * If the value in dst_reg is known nonnegative, then again the
+ * unsigned bounts capture the signed bounds.
+ * Thus, in all cases it suffices to blow away our signed bounds
+ * and rely on inferring new ones from the unsigned bounds and
+ * var_off of the result.
+ */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);

View File

@ -1,38 +0,0 @@
From: Tom Zanussi <tom.zanussi@linux.intel.com>
Date: Fri, 22 Sep 2017 14:58:17 -0500
Subject: [PATCH 03/42] tracing: Exclude 'generic fields' from histograms
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.8-rt9.tar.xz
There are a small number of 'generic fields' (comm/COMM/cpu/CPU) that
are found by trace_find_event_field() but are only meant for
filtering. Specifically, they unlike normal fields, they have a size
of 0 and thus wreak havoc when used as a histogram key.
Exclude these (return -EINVAL) when used as histogram keys.
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/trace/trace_events_hist.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -450,7 +450,7 @@ static int create_val_field(struct hist_
}
field = trace_find_event_field(file->event_call, field_name);
- if (!field) {
+ if (!field || !field->size) {
ret = -EINVAL;
goto out;
}
@@ -548,7 +548,7 @@ static int create_key_field(struct hist_
}
field = trace_find_event_field(file->event_call, field_name);
- if (!field) {
+ if (!field || !field->size) {
ret = -EINVAL;
goto out;
}

View File

@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION
select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -132,7 +132,7 @@ static long syscall_trace_enter(struct p
@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -623,7 +623,23 @@ GLOBAL(retint_user)
@@ -750,7 +750,23 @@ retint_kernel:
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@ -213,9 +213,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
BLANK();
@@ -93,4 +94,5 @@ void common(void) {
@@ -93,6 +94,7 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
/* Layout info for cpu_entry_area */
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);

View File

@ -30,7 +30,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3829,6 +3829,10 @@
@@ -4010,6 +4010,10 @@
switches= [HW,M68k]
@ -43,7 +43,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
on older distributions. When this option is enabled
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2757,6 +2757,14 @@ config COMPAT_32
@@ -2832,6 +2832,14 @@ config COMPAT_32
select HAVE_UID16
select OLD_SIGSUSPEND3
@ -60,7 +60,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
depends on IA32_EMULATION || X86_X32
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -9,6 +9,7 @@
@@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/auxvec.h>
@ -68,7 +68,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
typedef unsigned long elf_greg_t;
@@ -162,7 +163,7 @@ do { \
@@ -163,7 +164,7 @@ do { \
#define compat_elf_check_arch(x) \
(elf_check_arch_ia32(x) || \
@ -79,7 +79,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
# error "The following code assumes __USER32_DS == __USER_DS"
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -190,8 +190,12 @@ entry_SYSCALL_64_fastpath:
@@ -247,8 +247,12 @@ entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max, %rax
#else
@ -94,8 +94,8 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
#endif
ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10, %rcx
@@ -326,6 +330,16 @@ opportunistic_sysret_failed:
jmp restore_c_regs_and_iret
@@ -405,6 +409,16 @@ syscall_return_via_sysret:
USERGS_SYSRET64
END(entry_SYSCALL_64)
+#if __SYSCALL_MASK != ~0
@ -113,7 +113,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
* Syscalls marked as needing ptregs land here.
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -3,8 +3,14 @@
@@ -4,8 +4,14 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
@ -128,7 +128,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
#define __SYSCALL_64_QUAL_(sym) sym
#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
@@ -25,3 +31,36 @@ asmlinkage const sys_call_ptr_t sys_call
@@ -26,3 +32,36 @@ asmlinkage const sys_call_ptr_t sys_call
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
@ -167,7 +167,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+#endif
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -264,6 +264,7 @@ __visible void do_syscall_64(struct pt_r
@@ -271,6 +271,7 @@ __visible void do_syscall_64(struct pt_r
{
struct thread_info *ti = current_thread_info();
unsigned long nr = regs->orig_ax;
@ -175,7 +175,7 @@ Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
enter_from_user_mode();
local_irq_enable();
@@ -276,8 +277,19 @@ __visible void do_syscall_64(struct pt_r
@@ -283,8 +284,19 @@ __visible void do_syscall_64(struct pt_r
* table. The only functional difference is the x32 bit in
* regs->orig_ax, which changes the behavior of some syscalls.
*/

View File

@ -129,14 +129,6 @@ bugfix/all/kvm-fix-stack-out-of-bounds-read-in-write_mmio.patch
bugfix/all/bluetooth-prevent-stack-info-leak-from-the-efs-element.patch
bugfix/all/bpf-encapsulate-verifier-log-state-into-a-structure.patch
bugfix/all/bpf-move-global-verifier-log-into-verifier-environme.patch
bugfix/all/bpf-fix-branch-pruning-logic.patch
bugfix/all/bpf-verifier-fix-bounds-calculation-on-bpf_rsh.patch
bugfix/all/bpf-fix-incorrect-sign-extension-in-check_alu_op.patch
bugfix/all/bpf-fix-incorrect-tracking-of-register-size-truncati.patch
bugfix/all/bpf-fix-32-bit-alu-op-verification.patch
bugfix/all/bpf-fix-missing-error-return-in-check_stack_boundary.patch
bugfix/all/bpf-force-strict-alignment-checks-for-stack-pointers.patch
bugfix/all/bpf-don-t-prune-branches-when-a-scalar-is-replaced-w.patch
bugfix/all/bpf-fix-integer-overflows.patch
# Fix exported symbol versions

View File

@ -131,7 +131,6 @@ features/all/rt/add_migrate_disable.patch
# tracing: Bug fixes and minor cleanup | 2017-09-22
features/all/rt/0001-tracing-Steve-s-unofficial-trace_recursive_lock-patc.patch
features/all/rt/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch
features/all/rt/0003-tracing-Exclude-generic-fields-from-histograms.patch
features/all/rt/0004-tracing-Remove-lookups-from-tracing_map-hitcount.patch
features/all/rt/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch
features/all/rt/0006-tracing-Make-traceprobe-parsing-code-reusable.patch