[PATCH 12/13] swiotlb: remove swiotlb_set_mem_attributes

From: Christoph Hellwig
Date: Mon Mar 05 2018 - 12:48:17 EST


Now that set_memory_decrypted is always available we can just call it
directly.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
arch/x86/include/asm/mem_encrypt.h | 2 --
arch/x86/mm/mem_encrypt.c | 9 ---------
lib/swiotlb.c | 12 ++++++------
3 files changed, 6 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 22c5f3e6f820..9da0b63c8fc7 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -48,8 +48,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);

-void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
-
bool sme_active(void);
bool sev_active(void);

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 8bfc735bbdd7..65f45e0ef496 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -379,15 +379,6 @@ void __init mem_encrypt_init(void)
: "Secure Memory Encryption (SME)");
}

-void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
-{
- WARN(PAGE_ALIGN(size) != size,
- "size is not page-aligned (%#lx)\n", size);
-
- /* Make the SWIOTLB buffer area decrypted */
- set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
-}
-
struct sme_populate_pgd_data {
void *pgtable_area;
pgd_t *pgd;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ca8eeaead925..8b06b4485e65 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
+#include <linux/set_memory.h>

#include <asm/io.h>
#include <asm/dma.h>
@@ -156,8 +157,6 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}

-void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
-
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
@@ -202,12 +201,12 @@ void __init swiotlb_update_mem_attributes(void)

vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
- swiotlb_set_mem_attributes(vaddr, bytes);
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);

vaddr = phys_to_virt(io_tlb_overflow_buffer);
bytes = PAGE_ALIGN(io_tlb_overflow);
- swiotlb_set_mem_attributes(vaddr, bytes);
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
}

@@ -348,7 +347,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;

- swiotlb_set_mem_attributes(tlb, bytes);
+ set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes);

/*
@@ -359,7 +358,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!v_overflow_buffer)
goto cleanup2;

- swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
+ set_memory_decrypted((unsigned long)v_overflow_buffer,
+ io_tlb_overflow >> PAGE_SHIFT);
memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);

--
2.14.2