[RFC PATCH v3 14/21] KVM: x86: Kill cur_tsc_{nsec,offset,write} fields

From: David Woodhouse
Date: Tue May 21 2024 - 20:22:22 EST


From: David Woodhouse <dwmw@xxxxxxxxxxxx>

These pointlessly duplicate the last_tsc_{nsec,offset,write} values.

The only place they were used was where the TSC is stable and a new vCPU
is being synchronized to the previous setting, in which case the 'last_'
value is definitely identical.

Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 3 ---
arch/x86/kvm/x86.c | 19 ++++++++-----------
2 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b01c1d000fff..7d06f389a607 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1354,9 +1354,6 @@ struct kvm_arch {
u32 last_tsc_khz;
u64 last_tsc_offset;
u64 last_tsc_scaling_ratio;
- u64 cur_tsc_nsec;
- u64 cur_tsc_write;
- u64 cur_tsc_offset;
u64 cur_tsc_generation;
int nr_vcpus_matched_tsc;

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6ec43f39bdb0..ab5d55071253 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2713,11 +2713,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
lockdep_assert_held(&kvm->arch.tsc_write_lock);

/*
- * We also track th most recent recorded KHZ, write and time to
- * allow the matching interval to be extended at each write.
+ * Track the last recorded kHz (and associated scaling ratio for
+ * calculating the guest TSC), and offset.
*/
- kvm->arch.last_tsc_nsec = ns;
- kvm->arch.last_tsc_write = tsc;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
kvm->arch.last_tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
kvm->arch.last_tsc_offset = offset;
@@ -2736,10 +2734,9 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
*
* These values are tracked in kvm->arch.cur_xxx variables.
*/
+ kvm->arch.last_tsc_nsec = ns;
+ kvm->arch.last_tsc_write = tsc;
kvm->arch.cur_tsc_generation++;
- kvm->arch.cur_tsc_nsec = ns;
- kvm->arch.cur_tsc_write = tsc;
- kvm->arch.cur_tsc_offset = offset;
kvm->arch.nr_vcpus_matched_tsc = 0;
} else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
kvm->arch.nr_vcpus_matched_tsc++;
@@ -2747,8 +2744,8 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,

/* Keep track of which generation this VCPU has synchronized to */
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
- vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
- vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+ vcpu->arch.this_tsc_nsec = kvm->arch.last_tsc_nsec;
+ vcpu->arch.this_tsc_write = kvm->arch.last_tsc_write;

kvm_track_tsc_matching(vcpu);
}
@@ -2825,8 +2822,8 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
data = kvm->arch.last_tsc_write;

if (!kvm_check_tsc_unstable()) {
- offset = kvm->arch.cur_tsc_offset;
- ns = kvm->arch.cur_tsc_nsec;
+ offset = kvm->arch.last_tsc_offset;
+ ns = kvm->arch.last_tsc_nsec;
} else {
/*
* ... unless the TSC is unstable and has to be
--
2.44.0