Re: [PATCH 3/3] KVM: selftests: Test Hyper-V invariant TSC control

From: Vitaly Kuznetsov
Date: Thu Jul 14 2022 - 10:57:16 EST


Maxim Levitsky <mlevitsk@xxxxxxxxxx> writes:

> On Wed, 2022-07-13 at 17:05 +0200, Vitaly Kuznetsov wrote:
>> Add a test for the newly introduced Hyper-V invariant TSC control feature:
>> - HV_X64_MSR_TSC_INVARIANT_CONTROL is not available without
>>  HV_ACCESS_TSC_INVARIANT CPUID bit set and available with it.
>> - BIT(0) of HV_X64_MSR_TSC_INVARIANT_CONTROL controls the filtering of
>> architectural invariant TSC (CPUID.80000007H:EDX[8]) bit.
>>
>> Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
>> ---
>>  .../selftests/kvm/x86_64/hyperv_features.c    | 73 ++++++++++++++++++-
>>  1 file changed, 69 insertions(+), 4 deletions(-)
>>
>> diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
>> index c05acd78548f..9599eecdedff 100644
>> --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
>> +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
>> @@ -15,6 +15,9 @@
>>  
>>  #define LINUX_OS_ID ((u64)0x8100 << 48)
>>  
>> +/* CPUID.80000007H:EDX */
>> +#define X86_FEATURE_INVTSC (1 << 8)
>> +
>>  static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
>>                                 vm_vaddr_t output_address, uint64_t *hv_status)
>>  {
>> @@ -60,6 +63,24 @@ static void guest_msr(struct msr_data *msr)
>>                 GUEST_ASSERT_2(!vector, msr->idx, vector);
>>         else
>>                 GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector);
>> +
>> +       /* Invariant TSC bit appears when TSC invariant control MSR is written to */
>> +       if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
>> +               u32 eax = 0x80000007, ebx = 0, ecx = 0, edx = 0;
>> +
>> +               cpuid(&eax, &ebx, &ecx, &edx);
>> +
>> +               /*
>> +                * TSC invariant bit is present without the feature (legacy) or
>> +                * when the feature is present and enabled.
>> +                */
>> +               if ((!msr->available && !msr->write) || (msr->write && msr->write_val == 1))
>> +                       GUEST_ASSERT(edx & X86_FEATURE_INVTSC);
>> +               else
>> +                       GUEST_ASSERT(!(edx & X86_FEATURE_INVTSC));
>> +       }
>> +
>> +
>>         GUEST_DONE();
>>  }
>>  
>> @@ -105,6 +126,15 @@ static void hv_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
>>         vcpu_set_cpuid(vcpu, cpuid);
>>  }
>>  
>> +static bool guest_has_invtsc(void)
>> +{
>> +       struct kvm_cpuid_entry2 *cpuid;
>> +
>> +       cpuid = kvm_get_supported_cpuid_entry(0x80000007);
>> +
>> +       return cpuid->edx & X86_FEATURE_INVTSC;
>> +}
>> +
>>  static void guest_test_msrs_access(void)
>>  {
>>         struct kvm_vcpu *vcpu;
>> @@ -124,6 +154,7 @@ static void guest_test_msrs_access(void)
>>         struct kvm_cpuid2 *best;
>>         vm_vaddr_t msr_gva;
>>         struct msr_data *msr;
>> +       bool has_invtsc = guest_has_invtsc();
>>  
>>         while (true) {
>>                 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
>> @@ -136,8 +167,7 @@ static void guest_test_msrs_access(void)
>>                 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
>>  
>>                 vcpu_set_hv_cpuid(vcpu);
>> -
>> -               best = kvm_get_supported_hv_cpuid();
>> +               best = vcpu_get_cpuid(vcpu);
>>  
>>                 vm_init_descriptor_tables(vm);
>>                 vcpu_init_descriptor_tables(vcpu);
>> @@ -431,6 +461,42 @@ static void guest_test_msrs_access(void)
>>                         break;
>>  
>>                 case 44:
>> +                       /* MSR is not available when CPUID feature bit is unset */
>> +                       if (!has_invtsc)
>> +                               continue;
>> +                       msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
>> +                       msr->write = 0;
>> +                       msr->available = 0;
>> +                       break;
>> +               case 45:
>> +                       /* MSR is vailable when CPUID feature bit is set */
>> +                       if (!has_invtsc)
>> +                               continue;
>> +                       feat.eax |= HV_ACCESS_TSC_INVARIANT;
>> +                       msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
>> +                       msr->write = 0;
>> +                       msr->available = 1;
>> +                       break;
>> +               case 46:
>> +                       /* Writing bits other than 0 is forbidden */
>> +                       if (!has_invtsc)
>> +                               continue;
>> +                       msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
>> +                       msr->write = 1;
>> +                       msr->write_val = 0xdeadbeef;
>> +                       msr->available = 0;
>> +                       break;
>> +               case 47:
>> +                       /* Setting bit 0 enables the feature */
>> +                       if (!has_invtsc)
>> +                               continue;
>> +                       msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
>> +                       msr->write = 1;
>> +                       msr->write_val = 1;
>> +                       msr->available = 1;
>> +                       break;
>> +
>> +               default:
>>                         kvm_vm_free(vm);
>>                         return;
>>                 }
>> @@ -502,8 +568,7 @@ static void guest_test_hcalls_access(void)
>>                 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
>>  
>>                 vcpu_set_hv_cpuid(vcpu);
>> -
>> -               best = kvm_get_supported_hv_cpuid();
>> +               best = vcpu_get_cpuid(vcpu);
>>  
>>                 run = vcpu->run;
>>  
>
> Tiny unrelated nitpick: 'msr->available' is misleading, it is more like
> 'msr->should_not_gp' or something - might be worth it to refactor in the future.
>

Indeed, sounds much better. I'll add a renaming patch when doing v2.

>
> Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>

Thanks!

>
> Best regards,
> Maxim Levitsky
>

--
Vitaly