97821c0
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
97821c0
index 15763af..f6978b0 100644
97821c0
--- a/arch/x86/kernel/process.c
97821c0
+++ b/arch/x86/kernel/process.c
97821c0
@@ -386,17 +386,21 @@ void default_idle(void)
97821c0
 		 */
97821c0
 		smp_mb();
97821c0
 
97821c0
+		rcu_idle_enter();
97821c0
 		if (!need_resched())
97821c0
 			safe_halt();	/* enables interrupts racelessly */
97821c0
 		else
97821c0
 			local_irq_enable();
97821c0
+		rcu_idle_exit();
97821c0
 		current_thread_info()->status |= TS_POLLING;
97821c0
 		trace_power_end(smp_processor_id());
97821c0
 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
97821c0
 	} else {
97821c0
 		local_irq_enable();
97821c0
 		/* loop is done by the caller */
97821c0
+		rcu_idle_enter();
97821c0
 		cpu_relax();
97821c0
+		rcu_idle_exit();
97821c0
 	}
97821c0
 }
97821c0
 #ifdef CONFIG_APM_MODULE
97821c0
@@ -457,14 +461,19 @@ static void mwait_idle(void)
97821c0
 
97821c0
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
97821c0
 		smp_mb();
97821c0
+		rcu_idle_enter();
97821c0
 		if (!need_resched())
97821c0
 			__sti_mwait(0, 0);
97821c0
 		else
97821c0
 			local_irq_enable();
97821c0
+		rcu_idle_exit();
97821c0
 		trace_power_end(smp_processor_id());
97821c0
 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
97821c0
-	} else
97821c0
+	} else {
97821c0
 		local_irq_enable();
97821c0
+		rcu_idle_enter();
97821c0
+		rcu_idle_exit();
97821c0
+	}
97821c0
 }
97821c0
 
97821c0
 /*
97821c0
@@ -477,8 +486,10 @@ static void poll_idle(void)
97821c0
 	trace_power_start(POWER_CSTATE, 0, smp_processor_id());
97821c0
 	trace_cpu_idle(0, smp_processor_id());
97821c0
 	local_irq_enable();
97821c0
+	rcu_idle_enter();
97821c0
 	while (!need_resched())
97821c0
 		cpu_relax();
97821c0
+	rcu_idle_exit();
97821c0
 	trace_power_end(smp_processor_id());
97821c0
 	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
97821c0
 }
97821c0
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
97821c0
index 485204f..6d9d4d5 100644
97821c0
--- a/arch/x86/kernel/process_32.c
97821c0
+++ b/arch/x86/kernel/process_32.c
97821c0
@@ -100,7 +100,6 @@ void cpu_idle(void)
97821c0
 	/* endless idle loop with no priority at all */
97821c0
 	while (1) {
97821c0
 		tick_nohz_idle_enter();
97821c0
-		rcu_idle_enter();
97821c0
 		while (!need_resched()) {
97821c0
 
97821c0
 			check_pgt_cache();
97821c0
@@ -117,7 +116,6 @@ void cpu_idle(void)
97821c0
 				pm_idle();
97821c0
 			start_critical_timings();
97821c0
 		}
97821c0
-		rcu_idle_exit();
97821c0
 		tick_nohz_idle_exit();
97821c0
 		preempt_enable_no_resched();
97821c0
 		schedule();
97821c0
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
97821c0
index 9b9fe4a..55a1a35 100644
97821c0
--- a/arch/x86/kernel/process_64.c
97821c0
+++ b/arch/x86/kernel/process_64.c
97821c0
@@ -140,13 +140,9 @@ void cpu_idle(void)
97821c0
 			/* Don't trace irqs off for idle */
97821c0
 			stop_critical_timings();
97821c0
 
97821c0
-			/* enter_idle() needs rcu for notifiers */
97821c0
-			rcu_idle_enter();
97821c0
-
97821c0
 			if (cpuidle_idle_call())
97821c0
 				pm_idle();
97821c0
 
97821c0
-			rcu_idle_exit();
97821c0
 			start_critical_timings();
97821c0
 
97821c0
 			/* In many cases the interrupt that ended idle
97821c0
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
97821c0
index 20bce51..a9ddab8 100644
97821c0
--- a/drivers/idle/intel_idle.c
97821c0
+++ b/drivers/idle/intel_idle.c
97821c0
@@ -261,6 +261,7 @@ static int intel_idle(struct cpuidle_device *dev,
97821c0
 	kt_before = ktime_get_real();
97821c0
 
97821c0
 	stop_critical_timings();
97821c0
+	rcu_idle_enter();
97821c0
 	if (!need_resched()) {
97821c0
 
97821c0
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
97821c0
@@ -268,6 +269,7 @@ static int intel_idle(struct cpuidle_device *dev,
97821c0
 		if (!need_resched())
97821c0
 			__mwait(eax, ecx);
97821c0
 	}
97821c0
+	rcu_idle_exit();
97821c0
 
97821c0
 	start_critical_timings();
97821c0
 
97821c0
97821c0