Re: [PATCH v2 2/4] KVM: x86: move sev_lock/unlock_vcpus_for_migration to kvm_main.c

From: Oliver Upton
Date: Wed Apr 09 2025 - 16:46:59 EST


On Tue, Apr 08, 2025 at 09:41:34PM -0400, Maxim Levitsky wrote:
> Move sev_lock/unlock_vcpus_for_migration to kvm_main and call the
> new functions the kvm_lock_all_vcpus/kvm_unlock_all_vcpus
> and kvm_lock_all_vcpus_nested.
>
> This code allows to lock all vCPUs without triggering lockdep warning
> about reaching MAX_LOCK_DEPTH depth by coercing the lockdep into
> thinking that we release all the locks other than vcpu'0 lock
> immediately after we take them.
>
> No functional change intended.
>
> Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx>
> ---
> arch/x86/kvm/svm/sev.c | 65 +++---------------------------------
> include/linux/kvm_host.h | 6 ++++
> virt/kvm/kvm_main.c | 71 ++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 81 insertions(+), 61 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 0bc708ee2788..7adc54b1f741 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1889,63 +1889,6 @@ enum sev_migration_role {
> SEV_NR_MIGRATION_ROLES,
> };
>
> -static int sev_lock_vcpus_for_migration(struct kvm *kvm,
> - enum sev_migration_role role)
> -{
> - struct kvm_vcpu *vcpu;
> - unsigned long i, j;
> -
> - kvm_for_each_vcpu(i, vcpu, kvm) {
> - if (mutex_lock_killable_nested(&vcpu->mutex, role))
> - goto out_unlock;
> -
> -#ifdef CONFIG_PROVE_LOCKING
> - if (!i)
> - /*
> - * Reset the role to one that avoids colliding with
> - * the role used for the first vcpu mutex.
> - */
> - role = SEV_NR_MIGRATION_ROLES;
> - else
> - mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
> -#endif
> - }
> -
> - return 0;
> -
> -out_unlock:
> -
> - kvm_for_each_vcpu(j, vcpu, kvm) {
> - if (i == j)
> - break;
> -
> -#ifdef CONFIG_PROVE_LOCKING
> - if (j)
> - mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
> -#endif
> -
> - mutex_unlock(&vcpu->mutex);
> - }
> - return -EINTR;
> -}
> -
> -static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
> -{
> - struct kvm_vcpu *vcpu;
> - unsigned long i;
> - bool first = true;
> -
> - kvm_for_each_vcpu(i, vcpu, kvm) {
> - if (first)
> - first = false;
> - else
> - mutex_acquire(&vcpu->mutex.dep_map,
> - SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
> -
> - mutex_unlock(&vcpu->mutex);
> - }
> -}
> -
> static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
> {
> struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
> @@ -2083,10 +2026,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> charged = true;
> }
>
> - ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
> + ret = kvm_lock_all_vcpus_nested(kvm, false, SEV_MIGRATION_SOURCE);
> if (ret)
> goto out_dst_cgroup;
> - ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
> + ret = kvm_lock_all_vcpus_nested(source_kvm, false, SEV_MIGRATION_TARGET);
> if (ret)
> goto out_dst_vcpu;
>
> @@ -2100,9 +2043,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
> ret = 0;
>
> out_source_vcpu:
> - sev_unlock_vcpus_for_migration(source_kvm);
> + kvm_unlock_all_vcpus(source_kvm);
> out_dst_vcpu:
> - sev_unlock_vcpus_for_migration(kvm);
> + kvm_unlock_all_vcpus(kvm);
> out_dst_cgroup:
> /* Operates on the source on success, on the destination on failure. */
> if (charged)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 1dedc421b3e3..30cf28bf5c80 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -1015,6 +1015,12 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
>
> void kvm_destroy_vcpus(struct kvm *kvm);
>
> +int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role);
> +void kvm_unlock_all_vcpus(struct kvm *kvm);
> +
> +#define kvm_lock_all_vcpus(kvm, trylock) \
> + kvm_lock_all_vcpus_nested(kvm, trylock, 0)
> +

Can you instead add lock / trylock variants of this?

kvm_trylock_all_vcpus(kvm) seems a bit more obvious in the calling code.

Thanks,
Oliver