60fd626
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
9f8bb91
From: Masahiro Yamada <yamada.masahiro@socionext.com>
60fd626
Date: Mon, 30 Sep 2019 14:59:25 +0900
9f8bb91
Subject: [PATCH] ARM: fix __get_user_check() in case uaccess_* calls are not
9f8bb91
 inlined
9f8bb91
9f8bb91
KernelCI reports that bcm2835_defconfig is no longer booting since
9f8bb91
commit ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING
9f8bb91
forcibly"):
9f8bb91
9f8bb91
  https://lkml.org/lkml/2019/9/26/825
9f8bb91
9f8bb91
I also received a regression report from Nicolas Saenz Julienne:
9f8bb91
9f8bb91
  https://lkml.org/lkml/2019/9/27/263
9f8bb91
9f8bb91
This problem has cropped up on arch/arm/config/bcm2835_defconfig
9f8bb91
because it enables CONFIG_CC_OPTIMIZE_FOR_SIZE. The compiler tends
9f8bb91
to prefer not inlining functions with -Os. I was able to reproduce
9f8bb91
it with other boards and defconfig files by manually enabling
9f8bb91
CONFIG_CC_OPTIMIZE_FOR_SIZE.
9f8bb91
9f8bb91
The __get_user_check() specifically uses r0, r1, r2 registers.
9f8bb91
So, uaccess_save_and_enable() and uaccess_restore() must be inlined
9f8bb91
in order to avoid those registers being overwritten in the callees.
9f8bb91
9f8bb91
Prior to commit 9012d011660e ("compiler: allow all arches to enable
9f8bb91
CONFIG_OPTIMIZE_INLINING"), the 'inline' marker was always enough for
9f8bb91
inlining functions, except on x86.
9f8bb91
9f8bb91
Since that commit, all architectures can enable CONFIG_OPTIMIZE_INLINING.
9f8bb91
So, __always_inline is now the only guaranteed way of forcible inlining.
9f8bb91
9f8bb91
I want to keep as much compiler's freedom as possible about the inlining
9f8bb91
decision. So, I changed the function call order instead of adding
9f8bb91
__always_inline around.
9f8bb91
9f8bb91
Call uaccess_save_and_enable() before assigning the __p ("r0"), and
9f8bb91
uaccess_restore() after evacuating the __e ("r0").
9f8bb91
9f8bb91
Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
9f8bb91
Reported-by: "kernelci.org bot" <bot@kernelci.org>
9f8bb91
Reported-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
9f8bb91
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
9f8bb91
Acked-by: Arnd Bergmann <arnd@arndb.de>
9f8bb91
Tested-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
9f8bb91
Tested-by: Fabrizio Castro <fabrizio.castro@bp.renesas.com>
9f8bb91
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
9f8bb91
---
9f8bb91
 arch/arm/include/asm/uaccess.h | 8 +++++---
9f8bb91
 1 file changed, 5 insertions(+), 3 deletions(-)
9f8bb91
9f8bb91
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
60fd626
index 98c6b91be4a8..60055827dddc 100644
9f8bb91
--- a/arch/arm/include/asm/uaccess.h
9f8bb91
+++ b/arch/arm/include/asm/uaccess.h
9f8bb91
@@ -191,11 +191,12 @@ extern int __get_user_64t_4(void *);
9f8bb91
 #define __get_user_check(x, p)						\
9f8bb91
 	({								\
9f8bb91
 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
9f8bb91
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
9f8bb91
 		register typeof(*(p)) __user *__p asm("r0") = (p);	\
9f8bb91
 		register __inttype(x) __r2 asm("r2");			\
9f8bb91
 		register unsigned long __l asm("r1") = __limit;		\
9f8bb91
 		register int __e asm("r0");				\
9f8bb91
-		unsigned int __ua_flags = uaccess_save_and_enable();	\
9f8bb91
+		unsigned int __err;					\
9f8bb91
 		switch (sizeof(*(__p))) {				\
9f8bb91
 		case 1:							\
9f8bb91
 			if (sizeof((x)) >= 8)				\
9f8bb91
@@ -223,9 +224,10 @@ extern int __get_user_64t_4(void *);
9f8bb91
 			break;						\
9f8bb91
 		default: __e = __get_user_bad(); break;			\
9f8bb91
 		}							\
9f8bb91
-		uaccess_restore(__ua_flags);				\
9f8bb91
+		__err = __e;						\
9f8bb91
 		x = (typeof(*(p))) __r2;				\
9f8bb91
-		__e;							\
9f8bb91
+		uaccess_restore(__ua_flags);				\
9f8bb91
+		__err;							\
9f8bb91
 	})
60fd626
9f8bb91
 #define get_user(x, p)							\
60fd626
-- 
60fd626
2.26.2
60fd626