On Thu, Jun 01, 2023, Zeng Guang wrote:OK. Thanks.
From: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>Please write changelogs so that they make sense without the context of the shortlog.
Define a 32-bit parameter and consolidate the two bools into it.
In isolation, the above provides zero context. And there's no need to provide a
play-by-play description of the change, e.g. this can be:
Consolidate __linearize()'s @write and @fetch into a set of flags so that
additional flags can be added without needing more/new boolean parameters,
e.g. to precisely identify the access type for LASS.
__linearize() has two bool parameters write and fetch. And new flags/LAM/LASS
will be needed to support new feature (e.g. LAM needs a flag to skip
address untag under some conditions).Looks like this was copy+pasted LAM. AIUI, there is no untagging in LASS. Please,
please take the time to proofread what you're posting. To you it's a minor typo,
to others, incorrect statements like this can create a lot of confusion.
No functional change intended."unsigned int", not "u32". They're obviously the same effective thing, but using
Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
Signed-off-by: Zeng Guang <guang.zeng@xxxxxxxxx>
---
arch/x86/kvm/emulate.c | 19 +++++++++++++------
arch/x86/kvm/kvm_emulate.h | 4 ++++
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 936a397a08cd..9508836e8a35 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
- bool write, bool fetch,
- enum x86emul_mode mode, ulong *linear)
+ u32 flags, enum x86emul_mode mode,
"unsigned int" captures that the number of bits doesn't truly matter, e.g. isn't
reflected in hardware or anything. This could just as easily be a u16, but there's
obviously no reason to squeeze this into a u16.
+ ulong *linear)I'm tempted to have this be:
{
struct desc_struct desc;
bool usable;
@@ -696,6 +696,8 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u32 lim;
u16 sel;
u8 va_bits;
+ bool fetch = !!(flags & X86EMUL_F_FETCH);
+ bool write = !!(flags & X86EMUL_F_WRITE);
la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
@@ -757,7 +759,11 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
- return __linearize(ctxt, addr, &max_size, size, write, false,
+ u32 flags = 0;
+
+ if (write)
+ flags |= X86EMUL_F_WRITE;
+ return __linearize(ctxt, addr, &max_size, size, flags,
ctxt->mode, linear);
return __linearize(ctxt, addr, &max_size, size,
write ? X86EMUL_F_WRITE : 0, ctxt->mode, linear);
Mostly so that it's obvious "flags" is constant. The alterntive would e
const unsigned int flags = write ? X86EMUL_F_WRITE : 0;
But my preference is probably to omit "flags" entirely.
}Meh, just pass X86EMUL_F_FETCH directly, i.e. drop the local "flags".
@@ -768,10 +774,11 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
unsigned max_size;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = dst };
+ u32 flags = X86EMUL_F_FETCH;
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
- rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+ rc = __linearize(ctxt, addr, &max_size, 1, flags, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)And here.
ctxt->_eip = addr.ea;
return rc;
@@ -896,6 +903,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
+ u32 flags = X86EMUL_F_FETCH;
/*
* We do not know exactly how many bytes will be needed, and
@@ -907,8 +915,7 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
- rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
- &linear);
+ rc = __linearize(ctxt, addr, &max_size, 0, flags, ctxt->mode, &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index ab65f3a47dfd..5b9ec610b2cb 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -88,6 +88,10 @@ struct x86_instruction_info {
#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
+/* x86-specific emulation flags */
+#define X86EMUL_F_FETCH BIT(0)
+#define X86EMUL_F_WRITE BIT(1)
+
struct x86_emulate_ops {
void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
/*
--
2.27.0