[PATCH 4.19 73/90] powerpc/bpf: use unsigned division instruction for 64-bit operations
From: Greg Kroah-Hartman
Date: Mon Jun 24 2019 - 06:12:42 EST
From: Naveen N. Rao <naveen.n.rao@xxxxxxxxxxxxxxxxxx>
commit 758f2046ea040773ae8ea7f72dd3bbd8fa984501 upstream.
BPF_ALU64 div/mod operations are currently using signed division, unlike
BPF_ALU32 operations. Fix the same. DIV64 and MOD64 overflow tests pass
with this fix.
Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF")
Cc: stable@xxxxxxxxxxxxxxx # v4.8+
Signed-off-by: Naveen N. Rao <naveen.n.rao@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/powerpc/include/asm/ppc-opcode.h | 1 +
arch/powerpc/net/bpf_jit.h | 2 +-
arch/powerpc/net/bpf_jit_comp64.c | 8 ++++----
3 files changed, 6 insertions(+), 5 deletions(-)
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -336,6 +336,7 @@
#define PPC_INST_MULLI 0x1c000000
#define PPC_INST_DIVWU 0x7c000396
#define PPC_INST_DIVD 0x7c0003d2
+#define PPC_INST_DIVDU 0x7c000392
#define PPC_INST_RLWINM 0x54000000
#define PPC_INST_RLWIMI 0x50000000
#define PPC_INST_RLDICL 0x78000000
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -116,7 +116,7 @@
___PPC_RA(a) | IMM_L(i))
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
+#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -372,12 +372,12 @@ static int bpf_jit_build_body(struct bpf
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULD(b2p[TMP_REG_1], src_reg,
b2p[TMP_REG_1]);
PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
} else
- PPC_DIVD(dst_reg, dst_reg, src_reg);
+ PPC_DIVDU(dst_reg, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
@@ -405,7 +405,7 @@ static int bpf_jit_build_body(struct bpf
break;
case BPF_ALU64:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVD(b2p[TMP_REG_2], dst_reg,
+ PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
b2p[TMP_REG_1]);
PPC_MULD(b2p[TMP_REG_1],
b2p[TMP_REG_1],
@@ -413,7 +413,7 @@ static int bpf_jit_build_body(struct bpf
PPC_SUB(dst_reg, dst_reg,
b2p[TMP_REG_1]);
} else
- PPC_DIVD(dst_reg, dst_reg,
+ PPC_DIVDU(dst_reg, dst_reg,
b2p[TMP_REG_1]);
break;
}