[PATCH 6/8] x86/mm/cpa: Move CLFLUSH test into cpa_flush_array()
From: Peter Zijlstra
Date: Wed Sep 19 2018 - 05:00:33 EST
Rather than guarding cpa_flush_array() users with a CLFLUSH test, put
it inside.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/mm/pageattr.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -328,6 +328,11 @@ static void cpa_flush_array(unsigned lon
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
+ if (!static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ cpa_flush_all(cache);
+ return;
+ }
+
flush_tlb_kernel_range(baddr, baddr + PAGE_SIZE * numpages);
if (!cache)
@@ -1753,19 +1758,19 @@ static int change_page_attr_set_clr(unsi
cache = !!pgprot2cachemode(mask_set);
/*
- * On success we use CLFLUSH, when the CPU supports it to
- * avoid the WBINVD. If the CPU does not support it and in the
- * error case we fall back to cpa_flush_all (which uses
- * WBINVD):
- */
- if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
- if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
- cpa_flush_array(baddr, addr, numpages, cache,
- cpa.flags, pages);
- } else
- cpa_flush_range(baddr, numpages, cache);
- } else
+ * On error; flush everything to be sure.
+ */
+ if (ret) {
cpa_flush_all(cache);
+ goto out;
+ }
+
+ if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ cpa_flush_array(baddr, addr, numpages, cache,
+ cpa.flags, pages);
+ } else {
+ cpa_flush_range(baddr, numpages, cache);
+ }
out:
return ret;