summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-09-09 21:51:09 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-09-16 16:32:37 -0700
commit574de8766f6efa0757f3c7ac15c9eb29a4636861 (patch)
tree8c45b3abeda484174910c1eb7d51bed6b2c7a969 /kernel
parent2393a613d2e3da35bd73ee55d9dca0fb04810955 (diff)
rcu-tasks: Selectively enable more RCU Tasks Trace IPIs
Many workloads are quite sensitive to IPIs, and such workloads should build kernels with CONFIG_TASKS_TRACE_RCU_READ_MB=y to prevent RCU Tasks Trace from using them under normal conditions. However, other workloads are quite happy to permit more IPIs if doing so makes BPF program updates go faster. This commit therefore sets the default value for the rcupdate.rcu_task_ipi_delay kernel parameter to zero for kernels that have been built with CONFIG_TASKS_TRACE_RCU_READ_MB=n, while retaining the old default of (HZ / 10) for kernels that have indicated an aversion to IPIs via CONFIG_TASKS_TRACE_RCU_READ_MB=y. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Olsa <jolsa@redhat.com> Cc: <bpf@vger.kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tasks.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index ad8c4f3f44d2..2b4df237b598 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -83,7 +83,7 @@ static struct rcu_tasks rt_name = \
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
/* Avoid IPIing CPUs early in the grace period. */
-#define RCU_TASK_IPI_DELAY (HZ / 2)
+#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
module_param(rcu_task_ipi_delay, int, 0644);
@@ -916,7 +916,8 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// If currently running, send an IPI, either way, add to list.
trc_add_holdout(t, bhp);
- if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
+ if (task_curr(t) &&
+ time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
// The task is currently running, so try IPIing it.
cpu = task_cpu(t);