summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 15:09:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 15:09:40 -0700
commit98ec21a01896751b673b6c731ca8881daa8b2c6d (patch)
tree9d6d780675436efc894878475284c70f766126dd /kernel/sched/fair.c
parenta262948335bc5359b82f0ed5ef35f6e82ca44d16 (diff)
parentcbce1a686700595de65ee363b9b3283ae85d8fc5 (diff)
Merge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Thomas Gleixner: "This series of scheduler updates depends on sched/core and timers/core branches, which are already in your tree: - Scheduler balancing overhaul to plug a hard to trigger race which causes an oops in the balancer (Peter Zijlstra) - Lockdep updates which are related to the balancing updates (Peter Zijlstra)" * 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched,lockdep: Employ lock pinning lockdep: Implement lock pinning lockdep: Simplify lock_release() sched: Streamline the task migration locking a little sched: Move code around sched,dl: Fix sched class hopping CBS hole sched, dl: Convert switched_{from, to}_dl() / prio_changed_dl() to balance callbacks sched,dl: Remove return value from pull_dl_task() sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks sched,rt: Remove return value from pull_rt_task() sched: Allow balance callbacks for check_class_changed() sched: Use replace normalize_task() with __sched_setscheduler() sched: Replace post_schedule with a balance callback list
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40a7fcbf491e..3d57cc0ca0a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5392,7 +5392,15 @@ simple:
return p;
idle:
+ /*
+ * This is OK, because current is on_cpu, which avoids it being picked
+ * for load-balance and preemption/IRQs are still disabled avoiding
+ * further scheduler activity on it and we're being very careful to
+ * re-start the picking loop.
+ */
+ lockdep_unpin_lock(&rq->lock);
new_tasks = idle_balance(rq);
+ lockdep_pin_lock(&rq->lock);
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
@@ -7426,9 +7434,6 @@ static int idle_balance(struct rq *this_rq)
goto out;
}
- /*
- * Drop the rq->lock, but keep IRQ/preempt disabled.
- */
raw_spin_unlock(&this_rq->lock);
update_blocked_averages(this_cpu);