59d5af6732
drop numbers and commit hashes from patch metadata to reduce future patch churn
130 lines
4.9 KiB
Diff
130 lines
4.9 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Alexei Starovoitov <ast@fb.com>
|
|
Date: Thu, 4 Jan 2018 08:01:24 -0600
|
|
Subject: [PATCH] bpf: fix branch pruning logic
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
when the verifier detects that register contains a runtime constant
|
|
and it's compared with another constant it will prune exploration
|
|
of the branch that is guaranteed not to be taken at runtime.
|
|
This is all correct, but malicious program may be constructed
|
|
in such a way that it always has a constant comparison and
|
|
the other branch is never taken under any conditions.
|
|
In this case such path through the program will not be explored
|
|
by the verifier. It won't be taken at run-time either, but since
|
|
all instructions are JITed the malicious program may cause JITs
|
|
to complain about using reserved fields, etc.
|
|
To fix the issue we have to track the instructions explored by
|
|
the verifier and sanitize instructions that are dead at run time
|
|
with NOPs. We cannot reject such dead code, since llvm generates
|
|
it for valid C code, since it doesn't do as much data flow
|
|
analysis as the verifier does.
|
|
|
|
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
|
|
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
|
|
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
|
|
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
|
|
(cherry picked from commit c131187db2d3fa2f8bf32fdf4e9a4ef805168467)
|
|
CVE-2017-17862
|
|
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
|
|
Signed-off-by: Andy Whitcroft <apw@canonical.com>
|
|
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
|
|
(cherry picked from commit 2df70878d072d06f5bad0db3f2ee1ed47179dff8)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
include/linux/bpf_verifier.h | 2 +-
|
|
kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++
|
|
2 files changed, 28 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
|
|
index 8e5d31f6faef..effeaa64257d 100644
|
|
--- a/include/linux/bpf_verifier.h
|
|
+++ b/include/linux/bpf_verifier.h
|
|
@@ -75,7 +75,7 @@ struct bpf_insn_aux_data {
|
|
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
|
};
|
|
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
|
- int converted_op_size; /* the valid value width after perceived conversion */
|
|
+ bool seen; /* this insn was processed by the verifier */
|
|
};
|
|
|
|
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
|
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
|
|
index 4ecb2e10c5e0..dab5ba668b97 100644
|
|
--- a/kernel/bpf/verifier.c
|
|
+++ b/kernel/bpf/verifier.c
|
|
@@ -3152,6 +3152,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
if (err)
|
|
return err;
|
|
|
|
+ env->insn_aux_data[insn_idx].seen = true;
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
err = check_alu_op(env, insn);
|
|
if (err)
|
|
@@ -3342,6 +3343,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
return err;
|
|
|
|
insn_idx++;
|
|
+ env->insn_aux_data[insn_idx].seen = true;
|
|
} else {
|
|
verbose("invalid BPF_LD mode\n");
|
|
return -EINVAL;
|
|
@@ -3523,6 +3525,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
|
|
u32 off, u32 cnt)
|
|
{
|
|
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
|
|
+ int i;
|
|
|
|
if (cnt == 1)
|
|
return 0;
|
|
@@ -3532,6 +3535,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
|
|
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
|
|
memcpy(new_data + off + cnt - 1, old_data + off,
|
|
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
|
+ for (i = off; i < off + cnt - 1; i++)
|
|
+ new_data[i].seen = true;
|
|
env->insn_aux_data = new_data;
|
|
vfree(old_data);
|
|
return 0;
|
|
@@ -3550,6 +3555,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
|
|
return new_prog;
|
|
}
|
|
|
|
+/* The verifier does more data flow analysis than llvm and will not explore
|
|
+ * branches that are dead at run time. Malicious programs can have dead code
|
|
+ * too. Therefore replace all dead at-run-time code with nops.
|
|
+ */
|
|
+static void sanitize_dead_code(struct bpf_verifier_env *env)
|
|
+{
|
|
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
|
|
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
|
|
+ struct bpf_insn *insn = env->prog->insnsi;
|
|
+ const int insn_cnt = env->prog->len;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < insn_cnt; i++) {
|
|
+ if (aux_data[i].seen)
|
|
+ continue;
|
|
+ memcpy(insn + i, &nop, sizeof(nop));
|
|
+ }
|
|
+}
|
|
+
|
|
/* convert load instructions that access fields of 'struct __sk_buff'
|
|
* into sequence of instructions that access fields of 'struct sk_buff'
|
|
*/
|
|
@@ -3841,6 +3865,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
|
|
while (pop_stack(env, NULL) >= 0);
|
|
free_states(env);
|
|
|
|
+ if (ret == 0)
|
|
+ sanitize_dead_code(env);
|
|
+
|
|
if (ret == 0)
|
|
/* program is valid, convert *(u32*)(ctx + off) accesses */
|
|
ret = convert_ctx_accesses(env);
|
|
--
|
|
2.14.2
|
|
|