3511b26
From 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba Mon Sep 17 00:00:00 2001
3511b26
From: Andy Lutomirski <luto@amacapital.net>
3511b26
Date: Sun, 21 Dec 2014 08:57:46 -0800
3511b26
Subject: x86, vdso: Use asm volatile in __getcpu
3511b26
3511b26
In Linux 3.18 and below, GCC hoists the lsl instructions in the
3511b26
pvclock code all the way to the beginning of __vdso_clock_gettime,
3511b26
slowing the non-paravirt case significantly.  For unknown reasons,
3511b26
presumably related to the removal of a branch, the performance issue
3511b26
is gone as of
3511b26
3511b26
e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu
3511b26
3511b26
but I don't trust GCC enough to expect the problem to stay fixed.
3511b26
3511b26
There should be no correctness issue, because the __getcpu calls in
3511b26
__vdso_vlock_gettime were never necessary in the first place.
3511b26
3511b26
Note to stable maintainers: In 3.18 and below, depending on
3511b26
configuration, gcc 4.9.2 generates code like this:
3511b26
3511b26
     9c3:       44 0f 03 e8             lsl    %ax,%r13d
3511b26
     9c7:       45 89 eb                mov    %r13d,%r11d
3511b26
     9ca:       0f 03 d8                lsl    %ax,%ebx
3511b26
3511b26
This patch won't apply as is to any released kernel, but I'll send a
3511b26
trivial backported version if needed.
3511b26
3511b26
Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support
3511b26
Cc: stable@vger.kernel.org # 3.8+
3511b26
Cc: Marcelo Tosatti <mtosatti@redhat.com>
3511b26
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
3511b26
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
3511b26
(Backported to 3.17.8 by Josh Boyer <jwboyer@fedoraproject.org>)
3511b26
3511b26
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
3511b26
index 2a46ca720afc..3db1fa5fdceb 100644
3511b26
--- a/arch/x86/include/asm/vsyscall.h
3511b26
+++ b/arch/x86/include/asm/vsyscall.h
3511b26
@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
3511b26
 		native_read_tscp(&p);
3511b26
 	} else {
3511b26
 		/* Load per CPU data from GDT */
3511b26
-		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
3511b26
+		asm volatile("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
3511b26
 	}
3511b26
 
3511b26
 	return p;