From 3511b268365e8d3ccfb44fbdee2d90d8f8c0d083 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Jan 12 2015 13:17:26 +0000 Subject: Add patch to fix loop in VDSO (rhbz 1178975) --- diff --git a/kernel.spec b/kernel.spec index ca8a724..d3e2bb8 100644 --- a/kernel.spec +++ b/kernel.spec @@ -765,6 +765,9 @@ Patch26122: batman-adv-Calculate-extra-tail-size-based-on-queued.patch #CVE-2014-9529 rhbz 1179813 1179853 Patch26124: KEYS-close-race-between-key-lookup-and-freeing.patch +#rhbz 1178975 +Patch26125: x86-vdso-Use-asm-volatile-in-__getcpu.patch + # git clone ssh://git.fedorahosted.org/git/kernel-arm64.git, git diff master...devel Patch30000: kernel-arm64.patch @@ -1500,6 +1503,9 @@ ApplyPatch batman-adv-Calculate-extra-tail-size-based-on-queued.patch #CVE-2014-9529 rhbz 1179813 1179853 ApplyPatch KEYS-close-race-between-key-lookup-and-freeing.patch +#rhbz 1178975 +ApplyPatch x86-vdso-Use-asm-volatile-in-__getcpu.patch + %if 0%{?aarch64patches} ApplyPatch kernel-arm64.patch %ifnarch aarch64 # this is stupid, but i want to notice before secondary koji does. @@ -2318,6 +2324,9 @@ fi # ||----w | # || || %changelog +* Mon Jan 12 2015 Josh Boyer +- Add patch to fix loop in VDSO (rhbz 1178975) + * Thu Jan 08 2015 Justin M. Forbes - 3.17.8-300 - Linux v3.17.8 diff --git a/x86-vdso-Use-asm-volatile-in-__getcpu.patch b/x86-vdso-Use-asm-volatile-in-__getcpu.patch new file mode 100644 index 0000000..bcf5a15 --- /dev/null +++ b/x86-vdso-Use-asm-volatile-in-__getcpu.patch @@ -0,0 +1,48 @@ +From 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Sun, 21 Dec 2014 08:57:46 -0800 +Subject: x86, vdso: Use asm volatile in __getcpu + +In Linux 3.18 and below, GCC hoists the lsl instructions in the +pvclock code all the way to the beginning of __vdso_clock_gettime, +slowing the non-paravirt case significantly. For unknown reasons, +presumably related to the removal of a branch, the performance issue +is gone as of + +e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu + +but I don't trust GCC enough to expect the problem to stay fixed. + +There should be no correctness issue, because the __getcpu calls in +__vdso_vlock_gettime were never necessary in the first place. + +Note to stable maintainers: In 3.18 and below, depending on +configuration, gcc 4.9.2 generates code like this: + + 9c3: 44 0f 03 e8 lsl %ax,%r13d + 9c7: 45 89 eb mov %r13d,%r11d + 9ca: 0f 03 d8 lsl %ax,%ebx + +This patch won't apply as is to any released kernel, but I'll send a +trivial backported version if needed. + +Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support +Cc: stable@vger.kernel.org # 3.8+ +Cc: Marcelo Tosatti +Acked-by: Paolo Bonzini +Signed-off-by: Andy Lutomirski +(Backported to 3.17.8 by Josh Boyer ) + +diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h +index 2a46ca720afc..3db1fa5fdceb 100644 +--- a/arch/x86/include/asm/vsyscall.h ++++ b/arch/x86/include/asm/vsyscall.h +@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void) + native_read_tscp(&p); + } else { + /* Load per CPU data from GDT */ +- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); ++ asm volatile("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + } + + return p;