Jesse Keating 3494df0
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
Jesse Keating 3494df0
index 4b493f6..ada1fcd 100644
Jesse Keating 3494df0
--- a/kernel/softlockup.c
Jesse Keating 3494df0
+++ b/kernel/softlockup.c
Jesse Keating 3494df0
@@ -187,7 +187,9 @@ static int watchdog(void *__bind_cpu)
Jesse Keating 3494df0
 {
Jesse Keating 3494df0
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
Jesse Keating 3494df0
 
Jesse Keating 3494df0
+	rcu_read_lock();
Jesse Keating 3494df0
 	sched_setscheduler(current, SCHED_FIFO, ¶m;;
Jesse Keating 3494df0
+	rcu_read_unlock();
Jesse Keating 3494df0
 
Jesse Keating 3494df0
 	/* initialize timestamp */
Jesse Keating 3494df0
 	__touch_softlockup_watchdog();
Jesse Keating 3494df0
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
Jesse Keating 3494df0
index 5a5ea2c..47ecc56 100644
Jesse Keating 3494df0
--- a/kernel/sched_fair.c
Jesse Keating 3494df0
+++ b/kernel/sched_fair.c
Jesse Keating 3494df0
@@ -1272,6 +1272,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Jesse Keating 3494df0
 	 * effect of the currently running task from the load
Jesse Keating 3494df0
 	 * of the current CPU:
Jesse Keating 3494df0
 	 */
Jesse Keating 3494df0
+
Jesse Keating 3494df0
+	rcu_read_lock();
Jesse Keating 3494df0
+
Jesse Keating 3494df0
 	if (sync) {
Jesse Keating 3494df0
 		tg = task_group(current);
Jesse Keating 3494df0
 		weight = current->se.load.weight;
Jesse Keating 3494df0
@@ -1298,6 +1301,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Jesse Keating 3494df0
 		100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
Jesse Keating 3494df0
 		imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
Jesse Keating 3494df0
 
Jesse Keating 3494df0
+	rcu_read_unlock();
Jesse Keating 3494df0
 	/*
Jesse Keating 3494df0
 	 * If the currently running task will sleep within
Jesse Keating 3494df0
 	 * a reasonable amount of time then attract this newly