Re: [PATCH v4 5/6] add listmount(2) syscall

From: Arnd Bergmann
Date: Thu Jan 11 2024 - 18:02:15 EST


On Thu, Jan 11, 2024, at 21:14, Linus Torvalds wrote:

> The SH put_user64() needs to be looked at too, but in the meantime,
> maybe something like this fixes the problems with listmount?

I tried changing it to use the generic memcpy() based uaccess
that m68k-nommu and riscv-nommu use, which also avoids the
build failure. I still run into other unrelated build issues
on arch/sh, so I'm not sure if this is a sufficient fix.

Arnd

diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 7500521b2b98..2cc3a541e231 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -73,6 +73,7 @@ config SUPERH
select PERF_USE_VMALLOC
select RTC_LIB
select SPARSE_IRQ
+ select UACCESS_MEMCPY if !MMU
select TRACE_IRQFLAGS_SUPPORT
help
The SuperH is a RISC processor targeted for use in embedded systems
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index a79609eb14be..b42764d55901 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -2,6 +2,7 @@
#ifndef __ASM_SH_UACCESS_H
#define __ASM_SH_UACCESS_H

+#ifdef CONFIG_MMU
#include <asm/extable.h>
#include <asm-generic/access_ok.h>

@@ -130,4 +131,8 @@ struct mem_access {
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int, unsigned long address);

+#else
+#include <asm-generic/uaccess.h>
+#endif
+
#endif /* __ASM_SH_UACCESS_H */
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index 5d7ddc092afd..e053f2fd245c 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -35,7 +35,6 @@ do { \
} \
} while (0)

-#ifdef CONFIG_MMU
#define __get_user_asm(x, addr, err, insn) \
({ \
__asm__ __volatile__( \
@@ -56,16 +55,6 @@ __asm__ __volatile__( \
".previous" \
:"=&r" (err), "=&r" (x) \
:"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
-#else
-#define __get_user_asm(x, addr, err, insn) \
-do { \
- __asm__ __volatile__ ( \
- "mov." insn " %1, %0\n\t" \
- : "=&r" (x) \
- : "m" (__m(addr)) \
- ); \
-} while (0)
-#endif /* CONFIG_MMU */

extern void __get_user_unknown(void);

@@ -140,7 +129,6 @@ do { \
} \
} while (0)

-#ifdef CONFIG_MMU
#define __put_user_asm(x, addr, err, insn) \
do { \
__asm__ __volatile__ ( \
@@ -164,17 +152,6 @@ do { \
: "memory" \
); \
} while (0)
-#else
-#define __put_user_asm(x, addr, err, insn) \
-do { \
- __asm__ __volatile__ ( \
- "mov." insn " %0, %1\n\t" \
- : /* no outputs */ \
- : "r" (x), "m" (__m(addr)) \
- : "memory" \
- ); \
-} while (0)
-#endif /* CONFIG_MMU */

#if defined(CONFIG_CPU_LITTLE_ENDIAN)
#define __put_user_u64(val,addr,retval) \