summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-09-09 22:05:41 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-09-16 16:32:37 -0700
commit4fe192dfbe5ba9780df699d411aa4f25ba24cf61 (patch)
tree7996b27fe887a941a28ac6ad5e69efd62afd77ad
parent574de8766f6efa0757f3c7ac15c9eb29a4636861 (diff)
rcu-tasks: Shorten per-grace-period sleep for RCU Tasks Trace
The various RCU tasks flavors currently wait 100 milliseconds between each grace period in order to prevent CPU-bound loops and to favor efficiency over latency. However, RCU Tasks Trace needs to have a grace-period latency of roughly 25 milliseconds, which is completely infeasible given the 100-millisecond per-grace-period sleep. This commit therefore reduces this sleep duration to 5 milliseconds (or one jiffy, whichever is longer) in kernels built with CONFIG_TASKS_TRACE_RCU_READ_MB=y. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Olsa <jolsa@redhat.com> Cc: <bpf@vger.kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--kernel/rcu/tasks.h10
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 2b4df237b598..a0eaed522277 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -28,6 +28,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
+ * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
* @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
@@ -49,6 +50,7 @@ struct rcu_tasks {
struct wait_queue_head cbs_wq;
raw_spinlock_t cbs_lock;
int gp_state;
+ int gp_sleep;
int init_fract;
unsigned long gp_jiffies;
unsigned long gp_start;
@@ -233,7 +235,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
cond_resched();
}
/* Paranoid sleep to keep this from entering a tight loop */
- schedule_timeout_idle(HZ/10);
+ schedule_timeout_idle(rtp->gp_sleep);
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
}
@@ -557,6 +559,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
static int __init rcu_spawn_tasks_kthread(void)
{
+ rcu_tasks.gp_sleep = HZ / 10;
rcu_tasks.init_fract = 10;
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
@@ -690,6 +693,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
static int __init rcu_spawn_tasks_rude_kthread(void)
{
+ rcu_tasks_rude.gp_sleep = HZ / 10;
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
@@ -1170,8 +1174,12 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
static int __init rcu_spawn_tasks_trace_kthread(void)
{
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
+ rcu_tasks_trace.gp_sleep = HZ / 10;
rcu_tasks_trace.init_fract = 10;
} else {
+ rcu_tasks_trace.gp_sleep = HZ / 200;
+ if (rcu_tasks_trace.gp_sleep <= 0)
+ rcu_tasks_trace.gp_sleep = 1;
rcu_tasks_trace.init_fract = HZ / 5;
if (rcu_tasks_trace.init_fract <= 0)
rcu_tasks_trace.init_fract = 1;