Re: [PATCH v2 6/6] KVM: SEV: use scoped mutex guard in sev_asid_new()
From: Sean Christopherson
Date: Thu Feb 26 2026 - 18:56:48 EST
On Tue, Jan 20, 2026, Carlos López wrote:
> Simplify the lock management in sev_asid_new() by using a mutex guard,
> automatically releasing the mutex when following the goto.
>
> Signed-off-by: Carlos López <clopez@xxxxxxx>
> ---
> arch/x86/kvm/svm/sev.c | 24 ++++++++++--------------
> 1 file changed, 10 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index d3fa0963465d..d8d5c3a703f9 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -231,24 +231,20 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
> return ret;
> }
>
> - mutex_lock(&sev_bitmap_lock);
> -
> + scoped_guard(mutex, &sev_bitmap_lock) {
> again:
> - asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
> - if (asid > max_asid) {
> - if (retry && __sev_recycle_asids(min_asid, max_asid)) {
> - retry = false;
> - goto again;
> + asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
> + if (asid > max_asid) {
> + if (retry && __sev_recycle_asids(min_asid, max_asid)) {
> + retry = false;
> + goto again;
> + }
> + ret = -EBUSY;
> + goto e_uncharge;
> }
> - mutex_unlock(&sev_bitmap_lock);
> - ret = -EBUSY;
> - goto e_uncharge;
> + __set_bit(asid, sev_asid_bitmap);
> }
I think I'd prefer to throw this into a helper to avoid the goto within the
scoped guard. FWIW, I also tried (quite hard) to replace the goto with a loop,
and couldn't come up with anything better.
No need for you to send a v3, I'll incorporate these patches into a larger series
(there are some locking goofs that need to be fixed, and at least one of these
patches will generate an annoying-but-easy-to-resolve conflict).
Thanks!
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index af84357dc954..249384e30320 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -198,6 +198,28 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
misc_cg_uncharge(type, sev->misc_cg, 1);
}
+static unsigned int sev_alloc_asid(unsigned int min_asid, unsigned int max_asid)
+{
+ unsigned int asid;
+ bool retry = true;
+
+ guard(mutex)(&sev_bitmap_lock);
+
+again:
+ asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+ if (asid > max_asid) {
+ if (retry && __sev_recycle_asids(min_asid, max_asid)) {
+ retry = false;
+ goto again;
+ }
+
+ return asid;
+ }
+
+ __set_bit(asid, sev_asid_bitmap);
+ return asid;
+}
+
static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
{
/*
@@ -205,7 +227,6 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
*/
unsigned int min_asid, max_asid, asid;
- bool retry = true;
int ret;
if (vm_type == KVM_X86_SNP_VM) {
@@ -238,24 +259,12 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
return ret;
}
- mutex_lock(&sev_bitmap_lock);
-
-again:
- asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+ asid = sev_alloc_asid(min_asid, max_asid);
if (asid > max_asid) {
- if (retry && __sev_recycle_asids(min_asid, max_asid)) {
- retry = false;
- goto again;
- }
- mutex_unlock(&sev_bitmap_lock);
ret = -EBUSY;
goto e_uncharge;
}
- __set_bit(asid, sev_asid_bitmap);
-
- mutex_unlock(&sev_bitmap_lock);
-
sev->asid = asid;
return 0;
e_uncharge: