summaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ea02109aee77..f8e5c443d74e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1050,13 +1050,13 @@ retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
- * part of the read side critical section was preemptible -- see
+ * part of the read side critical section was irqs-enabled -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
- * side critical section is non-preemptible.
+ * side critical section has interrupts disabled.
*/
- preempt_disable();
+ local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
@@ -1070,21 +1070,22 @@ retry:
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- raw_spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
- raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
- preempt_enable();
+ local_irq_restore(*flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock(&ctx->lock);
ctx = NULL;
}
}
rcu_read_unlock();
- preempt_enable();
+ if (!ctx)
+ local_irq_restore(*flags);
return ctx;
}