summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/rcutorture.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7e01e9a87352..f82515cded34 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2053,6 +2053,14 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
atomic_inc(&barrier_cbs_invoked);
}
+/* IPI handler to get callback posted on desired CPU, if online. */
+static void rcu_torture_barrier1cb(void *rcu_void)
+{
+ struct rcu_head *rhp = rcu_void;
+
+ cur_ops->call(rhp, rcu_torture_barrier_cbf);
+}
+
/* kthread function to register callbacks used to test RCU barriers. */
static int rcu_torture_barrier_cbs(void *arg)
{
@@ -2076,9 +2084,11 @@ static int rcu_torture_barrier_cbs(void *arg)
* The above smp_load_acquire() ensures barrier_phase load
* is ordered before the following ->call().
*/
- local_irq_disable(); /* Just to test no-irq call_rcu(). */
- cur_ops->call(&rcu, rcu_torture_barrier_cbf);
- local_irq_enable();
+ if (smp_call_function_single(myid, rcu_torture_barrier1cb,
+ &rcu, 1)) {
+ // IPI failed, so use direct call from current CPU.
+ cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+ }
if (atomic_dec_and_test(&barrier_cbs_count))
wake_up(&barrier_wq);
} while (!torture_must_stop());