Re: [PATCH v6 2/2] s390/vfio-ap: r/w lock for PQAP interception handler function pointer

From: Tony Krowiak
Date: Wed Jun 30 2021 - 11:18:37 EST


I assumed that this patch would get queued along with the other one in this series,
but it looks like that was an erroneous assumption. Should this also be queued?

On 6/21/21 11:57 AM, Tony Krowiak wrote:
The function pointer to the interception handler for the PQAP instruction
can get changed during the interception process. Let's add a
semaphore to struct kvm_s390_crypto to control read/write access to the
function pointer contained therein.

The semaphore must be locked for write access by the vfio_ap device driver
when notified that the KVM pointer has been set or cleared. It must be
locked for read access by the interception framework when the PQAP
instruction is intercepted.

Signed-off-by: Tony Krowiak <akrowiak@xxxxxxxxxxxxx>
Reviewed-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
Cc: stable@xxxxxxxxxxxxxxx
---
arch/s390/include/asm/kvm_host.h | 8 +++-----
arch/s390/kvm/kvm-s390.c | 1 +
arch/s390/kvm/priv.c | 10 ++++++----
drivers/s390/crypto/vfio_ap_ops.c | 23 +++++++++++++++++------
drivers/s390/crypto/vfio_ap_private.h | 2 +-
5 files changed, 28 insertions(+), 16 deletions(-)

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8925f3969478..36441669ffba 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -803,14 +803,12 @@ struct kvm_s390_cpu_model {
unsigned short ibc;
};
-struct kvm_s390_module_hook {
- int (*hook)(struct kvm_vcpu *vcpu);
- struct module *owner;
-};
+typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb;
- struct kvm_s390_module_hook *pqap_hook;
+ struct rw_semaphore pqap_hook_rwsem;
+ crypto_hook *pqap_hook;
__u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1296fc10f80c..418d910df569 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2606,6 +2606,7 @@ static void kvm_s390_crypto_init(struct kvm *kvm)
{
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm);
+ init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
if (!test_kvm_facility(kvm, 76))
return;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 9928f785c677..6bed9406c1f3 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -610,6 +610,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
static int handle_pqap(struct kvm_vcpu *vcpu)
{
struct ap_queue_status status = {};
+ crypto_hook pqap_hook;
unsigned long reg0;
int ret;
uint8_t fc;
@@ -657,15 +658,16 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
* Verify that the hook callback is registered, lock the owner
* and call the hook.
*/
+ down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
if (vcpu->kvm->arch.crypto.pqap_hook) {
- if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
- return -EOPNOTSUPP;
- ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
- module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
+ pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
+ ret = pqap_hook(vcpu);
if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
kvm_s390_set_psw_cc(vcpu, 3);
+ up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
return ret;
}
+ up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
/*
* A vfio_driver must register a hook.
* No hook means no driver to enable the SIE CRYCB and no queues.
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 122c85c22469..742277bc8d1c 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -352,8 +352,7 @@ static int vfio_ap_mdev_create(struct mdev_device *mdev)
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
init_waitqueue_head(&matrix_mdev->wait_for_kvm);
mdev_set_drvdata(mdev, matrix_mdev);
- matrix_mdev->pqap_hook.hook = handle_pqap;
- matrix_mdev->pqap_hook.owner = THIS_MODULE;
+ matrix_mdev->pqap_hook = handle_pqap;
mutex_lock(&matrix_dev->lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
@@ -1115,15 +1114,20 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
}
kvm_get_kvm(kvm);
+ matrix_mdev->kvm = kvm;
matrix_mdev->kvm_busy = true;
mutex_unlock(&matrix_dev->lock);
+
+ down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+ up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
+
kvm_arch_crypto_set_masks(kvm,
matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm,
matrix_mdev->matrix.adm);
+
mutex_lock(&matrix_dev->lock);
- kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
- matrix_mdev->kvm = kvm;
matrix_mdev->kvm_busy = false;
wake_up_all(&matrix_mdev->wait_for_kvm);
}
@@ -1189,10 +1193,17 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
if (matrix_mdev->kvm) {
matrix_mdev->kvm_busy = true;
mutex_unlock(&matrix_dev->lock);
- kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+
+ if (matrix_mdev->kvm->arch.crypto.crycbd) {
+ down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
+
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ }
+
mutex_lock(&matrix_dev->lock);
vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
- matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
kvm_put_kvm(matrix_mdev->kvm);
matrix_mdev->kvm = NULL;
matrix_mdev->kvm_busy = false;
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index f82a6396acae..e12218e5a629 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -86,7 +86,7 @@ struct ap_matrix_mdev {
bool kvm_busy;
wait_queue_head_t wait_for_kvm;
struct kvm *kvm;
- struct kvm_s390_module_hook pqap_hook;
+ crypto_hook pqap_hook;
struct mdev_device *mdev;
};