Re: [PATCH] powerpcs: perf: consolidate perf_callchain_user_64 and perf_callchain_user_32

From: Christophe Leroy
Date: Tue Apr 07 2020 - 01:21:15 EST




Le 06/04/2020 Ã 23:00, Michal Suchanek a ÃcritÂ:
perf_callchain_user_64 and perf_callchain_user_32 are nearly identical.
Consolidate into one function with thin wrappers.

Suggested-by: Nicholas Piggin <npiggin@xxxxxxxxx>
Signed-off-by: Michal Suchanek <msuchanek@xxxxxxx>
---
arch/powerpc/perf/callchain.h | 24 +++++++++++++++++++++++-
arch/powerpc/perf/callchain_32.c | 21 ++-------------------
arch/powerpc/perf/callchain_64.c | 14 ++++----------
3 files changed, 29 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h
index 7a2cb9e1181a..7540bb71cb60 100644
--- a/arch/powerpc/perf/callchain.h
+++ b/arch/powerpc/perf/callchain.h
@@ -2,7 +2,7 @@
#ifndef _POWERPC_PERF_CALLCHAIN_H
#define _POWERPC_PERF_CALLCHAIN_H
-int read_user_stack_slow(void __user *ptr, void *buf, int nb);
+int read_user_stack_slow(const void __user *ptr, void *buf, int nb);

Does the constification of ptr has to be in this patch ?
Wouldn't it be better to have it as a separate patch ?

void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs);
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
@@ -16,4 +16,26 @@ static inline bool invalid_user_sp(unsigned long sp)
return (!sp || (sp & mask) || (sp > top));
}
+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables. Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static inline int __read_user_stack(const void __user *ptr, void *ret,
+ size_t size)
+{
+ int rc;
+
+ if ((unsigned long)ptr > TASK_SIZE - size ||
+ ((unsigned long)ptr & (size - 1)))
+ return -EFAULT;
+ rc = probe_user_read(ret, ptr, size);
+
+ if (rc && IS_ENABLED(CONFIG_PPC64))

gcc is probably smart enough to deal with it efficiently, but it would
be more correct to test rc after checking CONFIG_PPC64.

+ return read_user_stack_slow(ptr, ret, size);
+
+ return rc;
+}
+
#endif /* _POWERPC_PERF_CALLCHAIN_H */
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 8aa951003141..1b4621f177e8 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -31,26 +31,9 @@
#endif /* CONFIG_PPC64 */
-/*
- * On 32-bit we just access the address and let hash_page create a
- * HPTE if necessary, so there is no need to fall back to reading
- * the page tables. Since this is called at interrupt level,
- * do_page_fault() won't treat a DSI as a page fault.
- */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret)
{
- int rc;
-
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- rc = probe_user_read(ret, ptr, sizeof(*ret));
-
- if (IS_ENABLED(CONFIG_PPC64) && rc)
- return read_user_stack_slow(ptr, ret, 4);
-
- return rc;
+ return __read_user_stack(ptr, ret, sizeof(*ret);
}
/*
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index df1ffd8b20f2..55bbc25a54ed 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -24,7 +24,7 @@
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
-int read_user_stack_slow(void __user *ptr, void *buf, int nb)
+int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
{
int ret = -EFAULT;
pgd_t *pgdir;
@@ -65,16 +65,10 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb)
return ret;
}
-static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
+static int read_user_stack_64(const unsigned long __user *ptr,
+ unsigned long *ret)
{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
- ((unsigned long)ptr & 7))
- return -EFAULT;
-
- if (!probe_user_read(ret, ptr, sizeof(*ret)))
- return 0;
-
- return read_user_stack_slow(ptr, ret, 8);
+ return __read_user_stack(ptr, ret, sizeof(*ret));
}
/*


Christophe