[v3 1/2] powerpc/pseries/iommu: Share the per-cpu TCE page with the hypervisor.
From: Ram Pai
Date: Fri Nov 15 2019 - 15:49:26 EST
H_PUT_TCE_INDIRECT hcall uses a page filled with TCE entries, as one of
its parameters. One page is dedicated per cpu, for the lifetime of the
kernel for this purpose. On secure VMs, contents of this page, when
accessed by the hypervisor, retrieves encrypted TCE entries. Hypervisor
needs to know the unencrypted entries, to update the TCE table
accordingly. There is nothing secret or sensitive about these entries.
Hence share the page with the hypervisor.
Signed-off-by: Ram Pai <linuxram@xxxxxxxxxx>
---
arch/powerpc/platforms/pseries/iommu.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6ba081d..0720831 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -37,6 +37,7 @@
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
#include <asm/svm.h>
+#include <asm/ultravisor.h>
#include "pseries.h"
@@ -179,6 +180,23 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
static DEFINE_PER_CPU(__be64 *, tce_page);
+/*
+ * Allocate a tce page. If secure VM, share the page with the hypervisor.
+ *
+ * NOTE: the TCE page is shared with the hypervisor explicitly and remains
+ * shared for the lifetime of the kernel. It is implicitly unshared at kernel
+ * shutdown through a UV_UNSHARE_ALL_PAGES ucall.
+ */
+static __be64 *alloc_tce_page(void)
+{
+ __be64 *tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+
+ if (tcep && is_secure_guest())
+ uv_share_page(PHYS_PFN(__pa(tcep)), 1);
+
+ return tcep;
+}
+
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
@@ -206,8 +224,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
- tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
- /* If allocation fails, fall back to the loop implementation */
+ tcep = alloc_tce_page();
if (!tcep) {
local_irq_restore(flags);
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
@@ -405,7 +422,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
tcep = __this_cpu_read(tce_page);
if (!tcep) {
- tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ tcep = alloc_tce_page();
if (!tcep) {
local_irq_enable();
return -ENOMEM;
--
1.8.3.1