summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt7
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_stoptask.c2
5 files changed, 4 insertions, 14 deletions
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 8239ebbcddce..99961993257a 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -164,7 +164,7 @@ This is the (partial) list of the hooks:
It puts the scheduling entity (task) into the red-black tree and
increments the nr_running variable.
- - dequeue_tree(...)
+ - dequeue_task(...)
When a task is no longer runnable, this function is called to keep the
corresponding scheduling entity out of the red-black tree. It decrements
@@ -195,11 +195,6 @@ This is the (partial) list of the hooks:
This function is mostly called from time tick functions; it might lead to
process switch. This drives the running preemption.
- - task_new(...)
-
- The core scheduler gives the scheduling module an opportunity to manage new
- task startup. The CFS scheduling module uses it for group scheduling, while
- the scheduling module for a real-time task does not use it.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b8369d522bf8..83bd2e2982fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -517,7 +517,7 @@ struct thread_group_cputimer {
struct autogroup;
/*
- * NOTE! "signal_struct" does not have it's own
+ * NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
diff --git a/kernel/sched.c b/kernel/sched.c
index ae659b99ce73..f592ce6f8616 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5473,6 +5473,8 @@ EXPORT_SYMBOL(yield);
* yield_to - yield the current processor to another thread in
* your thread group, or accelerate that thread toward the
* processor it's on.
+ * @p: target task
+ * @preempt: whether task preemption is allowed or not
*
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
@@ -8449,7 +8451,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se;
- struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8462,8 +8463,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
tg->shares = NICE_0_LOAD;
for_each_possible_cpu(i) {
- rq = cpu_rq(i);
-
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index c82f26c1b7c3..a776a6396427 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -94,6 +94,4 @@ static const struct sched_class idle_sched_class = {
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
-
- /* no .task_new for idle tasks */
};
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 84ec9bcf82d9..1ba2bd40fdac 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -102,6 +102,4 @@ static const struct sched_class stop_sched_class = {
.prio_changed = prio_changed_stop,
.switched_to = switched_to_stop,
-
- /* no .task_new for stop tasks */
};