[PATCH 4.19 02/33] bpf: Do not use ax register in interpreter on div/mod

From: Greg Kroah-Hartman
Date: Wed Sep 01 2021 - 08:30:42 EST


From: Daniel Borkmann <daniel@xxxxxxxxxxxxx>

Partially undo old commit 144cd91c4c2b ("bpf: move tmp variable into ax
register in interpreter"). The reason we need this here is because ax
register will be used for holding temporary state for div/mod instruction
which otherwise interpreter would corrupt. This will cause a small +8 byte
stack increase for interpreter, but with the gain that we can use it from
verifier rewrites as scratch register.

Signed-off-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx>
Reviewed-by: John Fastabend <john.fastabend@xxxxxxxxx>
[cascardo: This partial revert is needed in order to support using AX for
the following two commits, as there is no JMP32 on 4.19.y]
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
kernel/bpf/core.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)

--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -705,9 +705,6 @@ static int bpf_jit_blind_insn(const stru
* below.
*
* Constant blinding is only used by JITs, not in the interpreter.
- * The interpreter uses AX in some occasions as a local temporary
- * register e.g. in DIV or MOD instructions.
- *
* In restricted circumstances, the verifier can also use the AX
* register for rewrites as long as they do not interfere with
* the above cases!
@@ -1057,6 +1054,7 @@ static u64 ___bpf_prog_run(u64 *regs, co
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
u32 tail_call_cnt = 0;
+ u64 tmp;

#define CONT ({ insn++; goto select_insn; })
#define CONT_JMP ({ insn++; goto select_insn; })
@@ -1117,36 +1115,36 @@ select_insn:
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
- div64_u64_rem(DST, SRC, &AX);
- DST = AX;
+ div64_u64_rem(DST, SRC, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_X:
- AX = (u32) DST;
- DST = do_div(AX, (u32) SRC);
+ tmp = (u32) DST;
+ DST = do_div(tmp, (u32) SRC);
CONT;
ALU64_MOD_K:
- div64_u64_rem(DST, IMM, &AX);
- DST = AX;
+ div64_u64_rem(DST, IMM, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_K:
- AX = (u32) DST;
- DST = do_div(AX, (u32) IMM);
+ tmp = (u32) DST;
+ DST = do_div(tmp, (u32) IMM);
CONT;
ALU64_DIV_X:
DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
- AX = (u32) DST;
- do_div(AX, (u32) SRC);
- DST = (u32) AX;
+ tmp = (u32) DST;
+ do_div(tmp, (u32) SRC);
+ DST = (u32) tmp;
CONT;
ALU64_DIV_K:
DST = div64_u64(DST, IMM);
CONT;
ALU_DIV_K:
- AX = (u32) DST;
- do_div(AX, (u32) IMM);
- DST = (u32) AX;
+ tmp = (u32) DST;
+ do_div(tmp, (u32) IMM);
+ DST = (u32) tmp;
CONT;
ALU_END_TO_BE:
switch (IMM) {