RE: [PATCH v2 2/3] KVM:SVM: Add extended intercept support
From: Babu Moger
Date: Wed Jun 17 2020 - 10:31:57 EST
> -----Original Message-----
> From: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
> Sent: Wednesday, June 17, 2020 7:02 AM
> To: Moger, Babu <Babu.Moger@xxxxxxx>
> Cc: linux-kernel@xxxxxxxxxxxxxxx; kvm@xxxxxxxxxxxxxxx;
> wanpengli@xxxxxxxxxxx; joro@xxxxxxxxxx; x86@xxxxxxxxxx;
> sean.j.christopherson@xxxxxxxxx; mingo@xxxxxxxxxx; bp@xxxxxxxxx;
> hpa@xxxxxxxxx; pbonzini@xxxxxxxxxx; tglx@xxxxxxxxxxxxx;
> jmattson@xxxxxxxxxx
> Subject: Re: [PATCH v2 2/3] KVM:SVM: Add extended intercept support
>
> Babu Moger <babu.moger@xxxxxxx> writes:
>
> > The new intercept bits have been added in vmcb control
> > area to support the interception of INVPCID instruction.
> >
> > The following bit is added to the VMCB layout control area
> > to control intercept of INVPCID:
> >
> > Byte Offset Bit(s) Function
> > 14h 2 intercept INVPCID
> >
> > Add the interfaces to support these extended interception.
> > Also update the tracing for extended intercepts.
> >
> > AMD documentation for INVPCID feature is available at "AMD64
> > Architecture Programmerâs Manual Volume 2: System Programming,
> > Pub. 24593 Rev. 3.34(or later)"
> >
> > The documentation can be obtained at the links below:
> > Link:
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fwww.a
> md.com%2Fsystem%2Ffiles%2FTechDocs%2F24593.pdf&data=02%7C01%7
> Cbabu.moger%40amd.com%7Cbacf7250d8b644d984e308d812b63d74%7C3dd8
> 961fe4884e608e11a82d994e183d%7C0%7C0%7C637279922105581873&s
> data=%2BGi374uikkiw2c35jk6w%2FYjMnh49R9%2FCw9twf%2BG6i%2FQ%3D&a
> mp;reserved=0
> > Link:
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbugzilla.
> kernel.org%2Fshow_bug.cgi%3Fid%3D206537&data=02%7C01%7Cbabu.m
> oger%40amd.com%7Cbacf7250d8b644d984e308d812b63d74%7C3dd8961fe488
> 4e608e11a82d994e183d%7C0%7C0%7C637279922105581873&sdata=dMz
> wBL9AfZAGYdqLFN9hHtC3BTTkuJLixFHNBl%2FnJbM%3D&reserved=0
> >
> > Signed-off-by: Babu Moger <babu.moger@xxxxxxx>
> > ---
> > arch/x86/include/asm/svm.h | 3 ++-
> > arch/x86/kvm/svm/nested.c | 6 +++++-
> > arch/x86/kvm/svm/svm.c | 1 +
> > arch/x86/kvm/svm/svm.h | 18 ++++++++++++++++++
> > arch/x86/kvm/trace.h | 12 ++++++++----
> > 5 files changed, 34 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> > index 8a1f5382a4ea..62649fba8908 100644
> > --- a/arch/x86/include/asm/svm.h
> > +++ b/arch/x86/include/asm/svm.h
> > @@ -61,7 +61,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
> > u32 intercept_dr;
> > u32 intercept_exceptions;
> > u64 intercept;
> > - u8 reserved_1[40];
> > + u32 intercept_extended;
> > + u8 reserved_1[36];
> > u16 pause_filter_thresh;
> > u16 pause_filter_count;
> > u64 iopm_base_pa;
> > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> > index 8a6db11dcb43..7f6d0f2533e2 100644
> > --- a/arch/x86/kvm/svm/nested.c
> > +++ b/arch/x86/kvm/svm/nested.c
> > @@ -121,6 +121,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
> > c->intercept_dr = h->intercept_dr;
> > c->intercept_exceptions = h->intercept_exceptions;
> > c->intercept = h->intercept;
> > + c->intercept_extended = h->intercept_extended;
> >
> > if (g->int_ctl & V_INTR_MASKING_MASK) {
> > /* We only want the cr8 intercept bits of L1 */
> > @@ -142,6 +143,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
> > c->intercept_dr |= g->intercept_dr;
> > c->intercept_exceptions |= g->intercept_exceptions;
> > c->intercept |= g->intercept;
> > + c->intercept_extended |= g->intercept_extended;
> > }
> >
> > static void copy_vmcb_control_area(struct vmcb_control_area *dst,
> > @@ -151,6 +153,7 @@ static void copy_vmcb_control_area(struct
> vmcb_control_area *dst,
> > dst->intercept_dr = from->intercept_dr;
> > dst->intercept_exceptions = from->intercept_exceptions;
> > dst->intercept = from->intercept;
> > + dst->intercept_extended = from->intercept_extended;
> > dst->iopm_base_pa = from->iopm_base_pa;
> > dst->msrpm_base_pa = from->msrpm_base_pa;
> > dst->tsc_offset = from->tsc_offset;
> > @@ -433,7 +436,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
> > trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr &
> 0xffff,
> > nested_vmcb->control.intercept_cr >> 16,
> > nested_vmcb->control.intercept_exceptions,
> > - nested_vmcb->control.intercept);
> > + nested_vmcb->control.intercept,
> > + nested_vmcb->control.intercept_extended);
> >
> > /* Clear internal status */
> > kvm_clear_exception_queue(&svm->vcpu);
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index 9e333b91ff78..285e5e1ff518 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -2801,6 +2801,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
> > pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
> > pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
> > pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
> > + pr_err("%-20s%08x\n", "intercepts (extended):", control-
> >intercept_extended);
> > pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
> > pr_err("%-20s%d\n", "pause filter threshold:",
> > control->pause_filter_thresh);
> > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> > index 6ac4c00a5d82..935d08fac03d 100644
> > --- a/arch/x86/kvm/svm/svm.h
> > +++ b/arch/x86/kvm/svm/svm.h
> > @@ -311,6 +311,24 @@ static inline void clr_intercept(struct vcpu_svm *svm,
> int bit)
> > recalc_intercepts(svm);
> > }
> >
> > +static inline void set_extended_intercept(struct vcpu_svm *svm, int bit)
> > +{
> > + struct vmcb *vmcb = get_host_vmcb(svm);
> > +
> > + vmcb->control.intercept_extended |= (1U << bit);
> > +
> > + recalc_intercepts(svm);
> > +}
> > +
> > +static inline void clr_extended_intercept(struct vcpu_svm *svm, int bit)
> > +{
> > + struct vmcb *vmcb = get_host_vmcb(svm);
> > +
> > + vmcb->control.intercept_extended &= ~(1U << bit);
> > +
> > + recalc_intercepts(svm);
> > +}
> > +
> > static inline bool is_intercept(struct vcpu_svm *svm, int bit)
> > {
> > return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
> > diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
> > index b66432b015d2..5c841c42b33d 100644
> > --- a/arch/x86/kvm/trace.h
> > +++ b/arch/x86/kvm/trace.h
> > @@ -544,14 +544,16 @@ TRACE_EVENT(kvm_nested_vmrun,
> > );
> >
> > TRACE_EVENT(kvm_nested_intercepts,
> > - TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64
> intercept),
> > - TP_ARGS(cr_read, cr_write, exceptions, intercept),
> > + TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64
> intercept,
> > + __u32 extended),
> > + TP_ARGS(cr_read, cr_write, exceptions, intercept, extended),
> >
> > TP_STRUCT__entry(
> > __field( __u16, cr_read )
> > __field( __u16, cr_write )
> > __field( __u32, exceptions )
> > __field( __u64, intercept )
> > + __field( __u32, extended )
> > ),
> >
> > TP_fast_assign(
> > @@ -559,11 +561,13 @@ TRACE_EVENT(kvm_nested_intercepts,
> > __entry->cr_write = cr_write;
> > __entry->exceptions = exceptions;
> > __entry->intercept = intercept;
> > + __entry->extended = extended;
> > ),
> >
> > - TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept:
> %016llx",
> > + TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx"
> > + "intercept (extended): %08x",
> > __entry->cr_read, __entry->cr_write, __entry->exceptions,
> > - __entry->intercept)
> > + __entry->intercept, __entry->extended)
>
> Nit: I would've renamed 'extended' to something like 'intercept_ext' as
> it is not clear what it is about otherwise. Also, if you decide to do
> so, you may as well shorten 'intercept_extended' to 'intercept_ext'
> everywhere else to be consistent. Or just use 'intercept_extended', with
> no 80-character-per-line limitation we no longer need to be concise.
With new suggestion from Jim, we probably donât need this change. Thanks
>
> > );
> > /*
> > * Tracepoint for #VMEXIT while nested
> >
>
> --
> Vitaly