3a7f0df
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
3a7f0df
index 094b5d9..64a4b03 100644
3a7f0df
--- a/arch/x86/kvm/x86.c
3a7f0df
+++ b/arch/x86/kvm/x86.c
3a7f0df
@@ -1194,20 +1194,37 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
3a7f0df
 	elapsed = ns - kvm->arch.last_tsc_nsec;
3a7f0df
 
3a7f0df
 	if (vcpu->arch.virtual_tsc_khz) {
3a7f0df
+		int faulted = 0;
3a7f0df
+
3a7f0df
 		/* n.b - signed multiplication and division required */
3a7f0df
 		usdiff = data - kvm->arch.last_tsc_write;
3a7f0df
 #ifdef CONFIG_X86_64
3a7f0df
 		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
3a7f0df
 #else
3a7f0df
 		/* do_div() only does unsigned */
3a7f0df
-		asm("idivl %2; xor %%edx, %%edx"
3a7f0df
-		: "=A"(usdiff)
3a7f0df
-		: "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
3a7f0df
+		asm("1: idivl %[divisor]\n"
3a7f0df
+		    "2: xor %%edx, %%edx\n"
3a7f0df
+		    "   movl $0, %[faulted]\n"
3a7f0df
+		    "3:\n"
3a7f0df
+		    ".section .fixup,\"ax\"\n"
3a7f0df
+		    "4: movl $1, %[faulted]\n"
3a7f0df
+		    "   jmp  3b\n"
3a7f0df
+		    ".previous\n"
3a7f0df
+
3a7f0df
+		_ASM_EXTABLE(1b, 4b)
3a7f0df
+
3a7f0df
+		: "=A"(usdiff), [faulted] "=r" (faulted)
3a7f0df
+		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
3a7f0df
+
3a7f0df
 #endif
3a7f0df
 		do_div(elapsed, 1000);
3a7f0df
 		usdiff -= elapsed;
3a7f0df
 		if (usdiff < 0)
3a7f0df
 			usdiff = -usdiff;
3a7f0df
+
3a7f0df
+		/* idivl overflow => difference is larger than USEC_PER_SEC */
3a7f0df
+		if (faulted)
3a7f0df
+			usdiff = USEC_PER_SEC;
3a7f0df
 	} else
3a7f0df
 		usdiff = USEC_PER_SEC; /* disable TSC match window below */
3a7f0df