From 974271c485a4d8bb801decc616748f90aafb07ec Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 12 Jul 2012 14:46:37 -0700 Subject: workqueue: don't use WQ_HIGHPRI for unbound workqueues Unbound wqs aren't concurrency-managed and try to execute work items as soon as possible. This is currently achieved by implicitly setting %WQ_HIGHPRI on all unbound workqueues; however, WQ_HIGHPRI implementation is about to be restructured and this usage won't be valid anymore. Add an explicit chain-wakeup path for unbound workqueues in process_one_work() instead of piggy backing on %WQ_HIGHPRI. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9a3128dc67df..27637c284cb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -580,6 +580,10 @@ static bool __need_more_worker(struct global_cwq *gcwq) /* * Need to wake up a worker? Called from anything but currently * running workers. + * + * Note that, because unbound workers never contribute to nr_running, this + * function will always return %true for unbound gcwq as long as the + * worklist isn't empty. */ static bool need_more_worker(struct global_cwq *gcwq) { @@ -1867,6 +1871,13 @@ __acquires(&gcwq->lock) if (unlikely(cpu_intensive)) worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); + /* + * Unbound gcwq isn't concurrency managed and work items should be + * executed ASAP. Wake up another worker if necessary. + */ + if ((worker->flags & WORKER_UNBOUND) && need_more_worker(gcwq)) + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); @@ -2984,13 +2995,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, if (flags & WQ_MEM_RECLAIM) flags |= WQ_RESCUER; - /* - * Unbound workqueues aren't concurrency managed and should be - * dispatched to workers immediately. - */ - if (flags & WQ_UNBOUND) - flags |= WQ_HIGHPRI; - max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); -- cgit v1.2.3 From bd7bdd43dcb81bb08240b9401b36a104f77dc135 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 12 Jul 2012 14:46:37 -0700 Subject: workqueue: factor out worker_pool from global_cwq Move worklist and all worker management fields from global_cwq into the new struct worker_pool. worker_pool points back to the containing gcwq. worker and cpu_workqueue_struct are updated to point to worker_pool instead of gcwq too. This change is mechanical and doesn't introduce any functional difference other than rearranging of fields and an added level of indirection in some places. This is to prepare for multiple pools per gcwq. v2: Comment typo fixes as suggested by Namhyung. Signed-off-by: Tejun Heo Cc: Namhyung Kim --- kernel/workqueue.c | 216 +++++++++++++++++++++++++++++------------------------ 1 file changed, 117 insertions(+), 99 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 27637c284cb9..61f154467026 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -115,6 +115,7 @@ enum { */ struct global_cwq; +struct worker_pool; /* * The poor guys doing the actual heavy lifting. All on-duty workers @@ -131,7 +132,7 @@ struct worker { struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ - struct global_cwq *gcwq; /* I: the associated gcwq */ + struct worker_pool *pool; /* I: the associated pool */ /* 64 bytes boundary on 64bit, 32 on 32bit */ unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ @@ -139,6 +140,21 @@ struct worker { struct work_struct rebind_work; /* L: rebind worker to cpu */ }; +struct worker_pool { + struct global_cwq *gcwq; /* I: the owning gcwq */ + + struct list_head worklist; /* L: list of pending works */ + int nr_workers; /* L: total number of workers */ + int nr_idle; /* L: currently idle ones */ + + struct list_head idle_list; /* X: list of idle workers */ + struct timer_list idle_timer; /* L: worker idle timeout */ + struct timer_list mayday_timer; /* L: SOS timer for workers */ + + struct ida worker_ida; /* L: for worker IDs */ + struct worker *first_idle; /* L: first idle worker */ +}; + /* * Global per-cpu workqueue. There's one and only one for each cpu * and all works are queued and processed here regardless of their @@ -146,27 +162,18 @@ struct worker { */ struct global_cwq { spinlock_t lock; /* the gcwq lock */ - struct list_head worklist; /* L: list of pending works */ unsigned int cpu; /* I: the associated cpu */ unsigned int flags; /* L: GCWQ_* flags */ - int nr_workers; /* L: total number of workers */ - int nr_idle; /* L: currently idle ones */ - - /* workers are chained either in the idle_list or busy_hash */ - struct list_head idle_list; /* X: list of idle workers */ + /* workers are chained either in busy_hash or pool idle_list */ struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; /* L: hash of busy workers */ - struct timer_list idle_timer; /* L: worker idle timeout */ - struct timer_list mayday_timer; /* L: SOS timer for dworkers */ - - struct ida worker_ida; /* L: for worker IDs */ + struct worker_pool pool; /* the worker pools */ struct task_struct *trustee; /* L: for gcwq shutdown */ unsigned int trustee_state; /* L: trustee state */ wait_queue_head_t trustee_wait; /* trustee wait */ - struct worker *first_idle; /* L: first idle worker */ } ____cacheline_aligned_in_smp; /* @@ -175,7 +182,7 @@ struct global_cwq { * aligned at two's power of the number of flag bits. */ struct cpu_workqueue_struct { - struct global_cwq *gcwq; /* I: the associated gcwq */ + struct worker_pool *pool; /* I: the associated pool */ struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ @@ -555,7 +562,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) if (data & WORK_STRUCT_CWQ) return ((struct cpu_workqueue_struct *) - (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; + (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; cpu = data >> WORK_STRUCT_FLAG_BITS; if (cpu == WORK_CPU_NONE) @@ -587,13 +594,13 @@ static bool __need_more_worker(struct global_cwq *gcwq) */ static bool need_more_worker(struct global_cwq *gcwq) { - return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); + return !list_empty(&gcwq->pool.worklist) && __need_more_worker(gcwq); } /* Can I start working? Called from busy but !running workers. */ static bool may_start_working(struct global_cwq *gcwq) { - return gcwq->nr_idle; + return gcwq->pool.nr_idle; } /* Do I need to keep working? Called from currently running workers. */ @@ -601,7 +608,7 @@ static bool keep_working(struct global_cwq *gcwq) { atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); - return !list_empty(&gcwq->worklist) && + return !list_empty(&gcwq->pool.worklist) && (atomic_read(nr_running) <= 1 || gcwq->flags & GCWQ_HIGHPRI_PENDING); } @@ -622,8 +629,8 @@ static bool need_to_manage_workers(struct global_cwq *gcwq) static bool too_many_workers(struct global_cwq *gcwq) { bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; - int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ - int nr_busy = gcwq->nr_workers - nr_idle; + int nr_idle = gcwq->pool.nr_idle + managing; /* manager is considered idle */ + int nr_busy = gcwq->pool.nr_workers - nr_idle; return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; } @@ -635,10 +642,10 @@ static bool too_many_workers(struct global_cwq *gcwq) /* Return the first worker. Safe with preemption disabled */ static struct worker *first_worker(struct global_cwq *gcwq) { - if (unlikely(list_empty(&gcwq->idle_list))) + if (unlikely(list_empty(&gcwq->pool.idle_list))) return NULL; - return list_first_entry(&gcwq->idle_list, struct worker, entry); + return list_first_entry(&gcwq->pool.idle_list, struct worker, entry); } /** @@ -696,7 +703,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, unsigned int cpu) { struct worker *worker = kthread_data(task), *to_wakeup = NULL; - struct global_cwq *gcwq = get_gcwq(cpu); + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; atomic_t *nr_running = get_gcwq_nr_running(cpu); if (worker->flags & WORKER_NOT_RUNNING) @@ -716,7 +724,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, * could be manipulating idle_list, so dereferencing idle_list * without gcwq lock is safe. */ - if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) + if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) to_wakeup = first_worker(gcwq); return to_wakeup ? to_wakeup->task : NULL; } @@ -737,7 +745,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { - struct global_cwq *gcwq = worker->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; WARN_ON_ONCE(worker->task != current); @@ -752,7 +761,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, if (wakeup) { if (atomic_dec_and_test(nr_running) && - !list_empty(&gcwq->worklist)) + !list_empty(&pool->worklist)) wake_up_worker(gcwq); } else atomic_dec(nr_running); @@ -773,7 +782,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { - struct global_cwq *gcwq = worker->gcwq; + struct global_cwq *gcwq = worker->pool->gcwq; unsigned int oflags = worker->flags; WARN_ON_ONCE(worker->task != current); @@ -894,9 +903,9 @@ static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, struct work_struct *twork; if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) - return &gcwq->worklist; + return &gcwq->pool.worklist; - list_for_each_entry(twork, &gcwq->worklist, entry) { + list_for_each_entry(twork, &gcwq->pool.worklist, entry) { struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); if (!(tcwq->wq->flags & WQ_HIGHPRI)) @@ -924,7 +933,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - struct global_cwq *gcwq = cwq->gcwq; + struct global_cwq *gcwq = cwq->pool->gcwq; /* we own @work, set data and link */ set_work_cwq(work, cwq, extra_flags); @@ -1196,7 +1205,8 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); */ static void worker_enter_idle(struct worker *worker) { - struct global_cwq *gcwq = worker->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; BUG_ON(worker->flags & WORKER_IDLE); BUG_ON(!list_empty(&worker->entry) && @@ -1204,15 +1214,15 @@ static void worker_enter_idle(struct worker *worker) /* can't use worker_set_flags(), also called from start_worker() */ worker->flags |= WORKER_IDLE; - gcwq->nr_idle++; + pool->nr_idle++; worker->last_active = jiffies; /* idle_list is LIFO */ - list_add(&worker->entry, &gcwq->idle_list); + list_add(&worker->entry, &pool->idle_list); if (likely(!(worker->flags & WORKER_ROGUE))) { - if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) - mod_timer(&gcwq->idle_timer, + if (too_many_workers(gcwq) && !timer_pending(&pool->idle_timer)) + mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); } else wake_up_all(&gcwq->trustee_wait); @@ -1223,7 +1233,7 @@ static void worker_enter_idle(struct worker *worker) * warning may trigger spuriously. Check iff trustee is idle. */ WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && - gcwq->nr_workers == gcwq->nr_idle && + pool->nr_workers == pool->nr_idle && atomic_read(get_gcwq_nr_running(gcwq->cpu))); } @@ -1238,11 +1248,11 @@ static void worker_enter_idle(struct worker *worker) */ static void worker_leave_idle(struct worker *worker) { - struct global_cwq *gcwq = worker->gcwq; + struct worker_pool *pool = worker->pool; BUG_ON(!(worker->flags & WORKER_IDLE)); worker_clr_flags(worker, WORKER_IDLE); - gcwq->nr_idle--; + pool->nr_idle--; list_del_init(&worker->entry); } @@ -1279,7 +1289,7 @@ static void worker_leave_idle(struct worker *worker) static bool worker_maybe_bind_and_lock(struct worker *worker) __acquires(&gcwq->lock) { - struct global_cwq *gcwq = worker->gcwq; + struct global_cwq *gcwq = worker->pool->gcwq; struct task_struct *task = worker->task; while (true) { @@ -1321,7 +1331,7 @@ __acquires(&gcwq->lock) static void worker_rebind_fn(struct work_struct *work) { struct worker *worker = container_of(work, struct worker, rebind_work); - struct global_cwq *gcwq = worker->gcwq; + struct global_cwq *gcwq = worker->pool->gcwq; if (worker_maybe_bind_and_lock(worker)) worker_clr_flags(worker, WORKER_REBIND); @@ -1362,13 +1372,14 @@ static struct worker *alloc_worker(void) static struct worker *create_worker(struct global_cwq *gcwq, bool bind) { bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; + struct worker_pool *pool = &gcwq->pool; struct worker *worker = NULL; int id = -1; spin_lock_irq(&gcwq->lock); - while (ida_get_new(&gcwq->worker_ida, &id)) { + while (ida_get_new(&pool->worker_ida, &id)) { spin_unlock_irq(&gcwq->lock); - if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) + if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) goto fail; spin_lock_irq(&gcwq->lock); } @@ -1378,7 +1389,7 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) if (!worker) goto fail; - worker->gcwq = gcwq; + worker->pool = pool; worker->id = id; if (!on_unbound_cpu) @@ -1409,7 +1420,7 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) fail: if (id >= 0) { spin_lock_irq(&gcwq->lock); - ida_remove(&gcwq->worker_ida, id); + ida_remove(&pool->worker_ida, id); spin_unlock_irq(&gcwq->lock); } kfree(worker); @@ -1428,7 +1439,7 @@ fail: static void start_worker(struct worker *worker) { worker->flags |= WORKER_STARTED; - worker->gcwq->nr_workers++; + worker->pool->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); } @@ -1444,7 +1455,8 @@ static void start_worker(struct worker *worker) */ static void destroy_worker(struct worker *worker) { - struct global_cwq *gcwq = worker->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; int id = worker->id; /* sanity check frenzy */ @@ -1452,9 +1464,9 @@ static void destroy_worker(struct worker *worker) BUG_ON(!list_empty(&worker->scheduled)); if (worker->flags & WORKER_STARTED) - gcwq->nr_workers--; + pool->nr_workers--; if (worker->flags & WORKER_IDLE) - gcwq->nr_idle--; + pool->nr_idle--; list_del_init(&worker->entry); worker->flags |= WORKER_DIE; @@ -1465,7 +1477,7 @@ static void destroy_worker(struct worker *worker) kfree(worker); spin_lock_irq(&gcwq->lock); - ida_remove(&gcwq->worker_ida, id); + ida_remove(&pool->worker_ida, id); } static void idle_worker_timeout(unsigned long __gcwq) @@ -1479,11 +1491,12 @@ static void idle_worker_timeout(unsigned long __gcwq) unsigned long expires; /* idle_list is kept in LIFO order, check the last one */ - worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + worker = list_entry(gcwq->pool.idle_list.prev, struct worker, + entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) - mod_timer(&gcwq->idle_timer, expires); + mod_timer(&gcwq->pool.idle_timer, expires); else { /* it's been idle for too long, wake up manager */ gcwq->flags |= GCWQ_MANAGE_WORKERS; @@ -1504,7 +1517,7 @@ static bool send_mayday(struct work_struct *work) return false; /* mayday mayday mayday */ - cpu = cwq->gcwq->cpu; + cpu = cwq->pool->gcwq->cpu; /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ if (cpu == WORK_CPU_UNBOUND) cpu = 0; @@ -1527,13 +1540,13 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) * allocation deadlock. Send distress signals to * rescuers. */ - list_for_each_entry(work, &gcwq->worklist, entry) + list_for_each_entry(work, &gcwq->pool.worklist, entry) send_mayday(work); } spin_unlock_irq(&gcwq->lock); - mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); + mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INTERVAL); } /** @@ -1568,14 +1581,14 @@ restart: spin_unlock_irq(&gcwq->lock); /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ - mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); + mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); while (true) { struct worker *worker; worker = create_worker(gcwq, true); if (worker) { - del_timer_sync(&gcwq->mayday_timer); + del_timer_sync(&gcwq->pool.mayday_timer); spin_lock_irq(&gcwq->lock); start_worker(worker); BUG_ON(need_to_create_worker(gcwq)); @@ -1592,7 +1605,7 @@ restart: break; } - del_timer_sync(&gcwq->mayday_timer); + del_timer_sync(&gcwq->pool.mayday_timer); spin_lock_irq(&gcwq->lock); if (need_to_create_worker(gcwq)) goto restart; @@ -1622,11 +1635,12 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq) struct worker *worker; unsigned long expires; - worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + worker = list_entry(gcwq->pool.idle_list.prev, struct worker, + entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) { - mod_timer(&gcwq->idle_timer, expires); + mod_timer(&gcwq->pool.idle_timer, expires); break; } @@ -1659,7 +1673,7 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq) */ static bool manage_workers(struct worker *worker) { - struct global_cwq *gcwq = worker->gcwq; + struct global_cwq *gcwq = worker->pool->gcwq; bool ret = false; if (gcwq->flags & GCWQ_MANAGING_WORKERS) @@ -1732,7 +1746,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) { struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); - struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); + struct list_head *pos = gcwq_determine_ins_pos(cwq->pool->gcwq, cwq); trace_workqueue_activate_work(work); move_linked_works(work, pos, NULL); @@ -1808,7 +1822,8 @@ __releases(&gcwq->lock) __acquires(&gcwq->lock) { struct cpu_workqueue_struct *cwq = get_work_cwq(work); - struct global_cwq *gcwq = cwq->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; work_func_t f = work->func; @@ -1854,10 +1869,10 @@ __acquires(&gcwq->lock) * wake up another worker; otherwise, clear HIGHPRI_PENDING. */ if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { - struct work_struct *nwork = list_first_entry(&gcwq->worklist, - struct work_struct, entry); + struct work_struct *nwork = list_first_entry(&pool->worklist, + struct work_struct, entry); - if (!list_empty(&gcwq->worklist) && + if (!list_empty(&pool->worklist) && get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) wake_up_worker(gcwq); else @@ -1950,7 +1965,8 @@ static void process_scheduled_works(struct worker *worker) static int worker_thread(void *__worker) { struct worker *worker = __worker; - struct global_cwq *gcwq = worker->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; /* tell the scheduler that this is a workqueue worker */ worker->task->flags |= PF_WQ_WORKER; @@ -1990,7 +2006,7 @@ recheck: do { struct work_struct *work = - list_first_entry(&gcwq->worklist, + list_first_entry(&pool->worklist, struct work_struct, entry); if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { @@ -2064,14 +2080,15 @@ repeat: for_each_mayday_cpu(cpu, wq->mayday_mask) { unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); - struct global_cwq *gcwq = cwq->gcwq; + struct worker_pool *pool = cwq->pool; + struct global_cwq *gcwq = pool->gcwq; struct work_struct *work, *n; __set_current_state(TASK_RUNNING); mayday_clear_cpu(cpu, wq->mayday_mask); /* migrate to the target cpu if possible */ - rescuer->gcwq = gcwq; + rescuer->pool = pool; worker_maybe_bind_and_lock(rescuer); /* @@ -2079,7 +2096,7 @@ repeat: * process'em. */ BUG_ON(!list_empty(&rescuer->scheduled)); - list_for_each_entry_safe(work, n, &gcwq->worklist, entry) + list_for_each_entry_safe(work, n, &pool->worklist, entry) if (get_work_cwq(work) == cwq) move_linked_works(work, scheduled, &n); @@ -2216,7 +2233,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - struct global_cwq *gcwq = cwq->gcwq; + struct global_cwq *gcwq = cwq->pool->gcwq; spin_lock_irq(&gcwq->lock); @@ -2432,9 +2449,9 @@ reflush: struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); bool drained; - spin_lock_irq(&cwq->gcwq->lock); + spin_lock_irq(&cwq->pool->gcwq->lock); drained = !cwq->nr_active && list_empty(&cwq->delayed_works); - spin_unlock_irq(&cwq->gcwq->lock); + spin_unlock_irq(&cwq->pool->gcwq->lock); if (drained) continue; @@ -2474,7 +2491,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, */ smp_rmb(); cwq = get_work_cwq(work); - if (unlikely(!cwq || gcwq != cwq->gcwq)) + if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) goto already_gone; } else if (wait_executing) { worker = find_worker_executing_work(gcwq, work); @@ -3017,7 +3034,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct global_cwq *gcwq = get_gcwq(cpu); BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); - cwq->gcwq = gcwq; + cwq->pool = &gcwq->pool; cwq->wq = wq; cwq->flush_color = -1; cwq->max_active = max_active; @@ -3344,7 +3361,7 @@ static int __cpuinit trustee_thread(void *__gcwq) gcwq->flags |= GCWQ_MANAGING_WORKERS; - list_for_each_entry(worker, &gcwq->idle_list, entry) + list_for_each_entry(worker, &gcwq->pool.idle_list, entry) worker->flags |= WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) @@ -3369,7 +3386,7 @@ static int __cpuinit trustee_thread(void *__gcwq) atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); spin_unlock_irq(&gcwq->lock); - del_timer_sync(&gcwq->idle_timer); + del_timer_sync(&gcwq->pool.idle_timer); spin_lock_irq(&gcwq->lock); /* @@ -3391,17 +3408,17 @@ static int __cpuinit trustee_thread(void *__gcwq) * may be frozen works in freezable cwqs. Don't declare * completion while frozen. */ - while (gcwq->nr_workers != gcwq->nr_idle || + while (gcwq->pool.nr_workers != gcwq->pool.nr_idle || gcwq->flags & GCWQ_FREEZING || gcwq->trustee_state == TRUSTEE_IN_CHARGE) { int nr_works = 0; - list_for_each_entry(work, &gcwq->worklist, entry) { + list_for_each_entry(work, &gcwq->pool.worklist, entry) { send_mayday(work); nr_works++; } - list_for_each_entry(worker, &gcwq->idle_list, entry) { + list_for_each_entry(worker, &gcwq->pool.idle_list, entry) { if (!nr_works--) break; wake_up_process(worker->task); @@ -3428,11 +3445,11 @@ static int __cpuinit trustee_thread(void *__gcwq) * all workers till we're canceled. */ do { - rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); - while (!list_empty(&gcwq->idle_list)) - destroy_worker(list_first_entry(&gcwq->idle_list, + rc = trustee_wait_event(!list_empty(&gcwq->pool.idle_list)); + while (!list_empty(&gcwq->pool.idle_list)) + destroy_worker(list_first_entry(&gcwq->pool.idle_list, struct worker, entry)); - } while (gcwq->nr_workers && rc >= 0); + } while (gcwq->pool.nr_workers && rc >= 0); /* * At this point, either draining has completed and no worker @@ -3441,7 +3458,7 @@ static int __cpuinit trustee_thread(void *__gcwq) * Tell the remaining busy ones to rebind once it finishes the * currently scheduled works by scheduling the rebind_work. */ - WARN_ON(!list_empty(&gcwq->idle_list)); + WARN_ON(!list_empty(&gcwq->pool.idle_list)); for_each_busy_worker(worker, i, pos, gcwq) { struct work_struct *rebind_work = &worker->rebind_work; @@ -3522,7 +3539,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, kthread_bind(new_trustee, cpu); /* fall through */ case CPU_UP_PREPARE: - BUG_ON(gcwq->first_idle); + BUG_ON(gcwq->pool.first_idle); new_worker = create_worker(gcwq, false); if (!new_worker) { if (new_trustee) @@ -3544,8 +3561,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); /* fall through */ case CPU_UP_PREPARE: - BUG_ON(gcwq->first_idle); - gcwq->first_idle = new_worker; + BUG_ON(gcwq->pool.first_idle); + gcwq->pool.first_idle = new_worker; break; case CPU_DYING: @@ -3562,8 +3579,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, gcwq->trustee_state = TRUSTEE_BUTCHER; /* fall through */ case CPU_UP_CANCELED: - destroy_worker(gcwq->first_idle); - gcwq->first_idle = NULL; + destroy_worker(gcwq->pool.first_idle); + gcwq->pool.first_idle = NULL; break; case CPU_DOWN_FAILED: @@ -3581,11 +3598,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, * take a look. */ spin_unlock_irq(&gcwq->lock); - kthread_bind(gcwq->first_idle->task, cpu); + kthread_bind(gcwq->pool.first_idle->task, cpu); spin_lock_irq(&gcwq->lock); gcwq->flags |= GCWQ_MANAGE_WORKERS; - start_worker(gcwq->first_idle); - gcwq->first_idle = NULL; + start_worker(gcwq->pool.first_idle); + gcwq->pool.first_idle = NULL; break; } @@ -3794,22 +3811,23 @@ static int __init init_workqueues(void) struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_init(&gcwq->lock); - INIT_LIST_HEAD(&gcwq->worklist); + gcwq->pool.gcwq = gcwq; + INIT_LIST_HEAD(&gcwq->pool.worklist); gcwq->cpu = cpu; gcwq->flags |= GCWQ_DISASSOCIATED; - INIT_LIST_HEAD(&gcwq->idle_list); + INIT_LIST_HEAD(&gcwq->pool.idle_list); for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) INIT_HLIST_HEAD(&gcwq->busy_hash[i]); - init_timer_deferrable(&gcwq->idle_timer); - gcwq->idle_timer.function = idle_worker_timeout; - gcwq->idle_timer.data = (unsigned long)gcwq; + init_timer_deferrable(&gcwq->pool.idle_timer); + gcwq->pool.idle_timer.function = idle_worker_timeout; + gcwq->pool.idle_timer.data = (unsigned long)gcwq; - setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, + setup_timer(&gcwq->pool.mayday_timer, gcwq_mayday_timeout, (unsigned long)gcwq); - ida_init(&gcwq->worker_ida); + ida_init(&gcwq->pool.worker_ida); gcwq->trustee_state = TRUSTEE_DONE; init_waitqueue_head(&gcwq->trustee_wait); -- cgit v1.2.3 From 63d95a9150ee3bbd4117fcd609dee40313b454d9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 12 Jul 2012 14:46:37 -0700 Subject: workqueue: use @pool instead of @gcwq or @cpu where applicable Modify all functions which deal with per-pool properties to pass around @pool instead of @gcwq or @cpu. The changes in this patch are mechanical and don't caues any functional difference. This is to prepare for multiple pools per gcwq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 218 +++++++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 107 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 61f154467026..2d82f7b193a0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -471,8 +471,10 @@ static struct global_cwq *get_gcwq(unsigned int cpu) return &unbound_global_cwq; } -static atomic_t *get_gcwq_nr_running(unsigned int cpu) +static atomic_t *get_pool_nr_running(struct worker_pool *pool) { + int cpu = pool->gcwq->cpu; + if (cpu != WORK_CPU_UNBOUND) return &per_cpu(gcwq_nr_running, cpu); else @@ -578,10 +580,10 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) * assume that they're being called with gcwq->lock held. */ -static bool __need_more_worker(struct global_cwq *gcwq) +static bool __need_more_worker(struct worker_pool *pool) { - return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || - gcwq->flags & GCWQ_HIGHPRI_PENDING; + return !atomic_read(get_pool_nr_running(pool)) || + pool->gcwq->flags & GCWQ_HIGHPRI_PENDING; } /* @@ -592,45 +594,46 @@ static bool __need_more_worker(struct global_cwq *gcwq) * function will always return %true for unbound gcwq as long as the * worklist isn't empty. */ -static bool need_more_worker(struct global_cwq *gcwq) +static bool need_more_worker(struct worker_pool *pool) { - return !list_empty(&gcwq->pool.worklist) && __need_more_worker(gcwq); + return !list_empty(&pool->worklist) && __need_more_worker(pool); } /* Can I start working? Called from busy but !running workers. */ -static bool may_start_working(struct global_cwq *gcwq) +static bool may_start_working(struct worker_pool *pool) { - return gcwq->pool.nr_idle; + return pool->nr_idle; } /* Do I need to keep working? Called from currently running workers. */ -static bool keep_working(struct global_cwq *gcwq) +static bool keep_working(struct worker_pool *pool) { - atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + atomic_t *nr_running = get_pool_nr_running(pool); - return !list_empty(&gcwq->pool.worklist) && + return !list_empty(&pool->worklist) && (atomic_read(nr_running) <= 1 || - gcwq->flags & GCWQ_HIGHPRI_PENDING); + pool->gcwq->flags & GCWQ_HIGHPRI_PENDING); } /* Do we need a new worker? Called from manager. */ -static bool need_to_create_worker(struct global_cwq *gcwq) +static bool need_to_create_worker(struct worker_pool *pool) { - return need_more_worker(gcwq) && !may_start_working(gcwq); + return need_more_worker(pool) && !may_start_working(pool); } /* Do I need to be the manager? */ -static bool need_to_manage_workers(struct global_cwq *gcwq) +static bool need_to_manage_workers(struct worker_pool *pool) { - return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; + return need_to_create_worker(pool) || + pool->gcwq->flags & GCWQ_MANAGE_WORKERS; } /* Do we have too many workers and should some go away? */ -static bool too_many_workers(struct global_cwq *gcwq) +static bool too_many_workers(struct worker_pool *pool) { - bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; - int nr_idle = gcwq->pool.nr_idle + managing; /* manager is considered idle */ - int nr_busy = gcwq->pool.nr_workers - nr_idle; + bool managing = pool->gcwq->flags & GCWQ_MANAGING_WORKERS; + int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ + int nr_busy = pool->nr_workers - nr_idle; return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; } @@ -640,26 +643,26 @@ static bool too_many_workers(struct global_cwq *gcwq) */ /* Return the first worker. Safe with preemption disabled */ -static struct worker *first_worker(struct global_cwq *gcwq) +static struct worker *first_worker(struct worker_pool *pool) { - if (unlikely(list_empty(&gcwq->pool.idle_list))) + if (unlikely(list_empty(&pool->idle_list))) return NULL; - return list_first_entry(&gcwq->pool.idle_list, struct worker, entry); + return list_first_entry(&pool->idle_list, struct worker, entry); } /** * wake_up_worker - wake up an idle worker - * @gcwq: gcwq to wake worker for + * @pool: worker pool to wake worker from * - * Wake up the first idle worker of @gcwq. + * Wake up the first idle worker of @pool. * * CONTEXT: * spin_lock_irq(gcwq->lock). */ -static void wake_up_worker(struct global_cwq *gcwq) +static void wake_up_worker(struct worker_pool *pool) { - struct worker *worker = first_worker(gcwq); + struct worker *worker = first_worker(pool); if (likely(worker)) wake_up_process(worker->task); @@ -681,7 +684,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) struct worker *worker = kthread_data(task); if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(get_gcwq_nr_running(cpu)); + atomic_inc(get_pool_nr_running(worker->pool)); } /** @@ -704,8 +707,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, { struct worker *worker = kthread_data(task), *to_wakeup = NULL; struct worker_pool *pool = worker->pool; - struct global_cwq *gcwq = pool->gcwq; - atomic_t *nr_running = get_gcwq_nr_running(cpu); + atomic_t *nr_running = get_pool_nr_running(pool); if (worker->flags & WORKER_NOT_RUNNING) return NULL; @@ -725,7 +727,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, * without gcwq lock is safe. */ if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) - to_wakeup = first_worker(gcwq); + to_wakeup = first_worker(pool); return to_wakeup ? to_wakeup->task : NULL; } @@ -746,7 +748,6 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { struct worker_pool *pool = worker->pool; - struct global_cwq *gcwq = pool->gcwq; WARN_ON_ONCE(worker->task != current); @@ -757,12 +758,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, */ if ((flags & WORKER_NOT_RUNNING) && !(worker->flags & WORKER_NOT_RUNNING)) { - atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + atomic_t *nr_running = get_pool_nr_running(pool); if (wakeup) { if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) - wake_up_worker(gcwq); + wake_up_worker(pool); } else atomic_dec(nr_running); } @@ -782,7 +783,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { - struct global_cwq *gcwq = worker->pool->gcwq; + struct worker_pool *pool = worker->pool; unsigned int oflags = worker->flags; WARN_ON_ONCE(worker->task != current); @@ -796,7 +797,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) */ if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(get_gcwq_nr_running(gcwq->cpu)); + atomic_inc(get_pool_nr_running(pool)); } /** @@ -880,15 +881,15 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, } /** - * gcwq_determine_ins_pos - find insertion position - * @gcwq: gcwq of interest + * pool_determine_ins_pos - find insertion position + * @pool: pool of interest * @cwq: cwq a work is being queued for * - * A work for @cwq is about to be queued on @gcwq, determine insertion + * A work for @cwq is about to be queued on @pool, determine insertion * position for the work. If @cwq is for HIGHPRI wq, the work is * queued at the head of the queue but in FIFO order with respect to * other HIGHPRI works; otherwise, at the end of the queue. This - * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that + * function also sets GCWQ_HIGHPRI_PENDING flag to hint @pool that * there are HIGHPRI works pending. * * CONTEXT: @@ -897,22 +898,22 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, * RETURNS: * Pointer to inserstion position. */ -static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, +static inline struct list_head *pool_determine_ins_pos(struct worker_pool *pool, struct cpu_workqueue_struct *cwq) { struct work_struct *twork; if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) - return &gcwq->pool.worklist; + return &pool->worklist; - list_for_each_entry(twork, &gcwq->pool.worklist, entry) { + list_for_each_entry(twork, &pool->worklist, entry) { struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); if (!(tcwq->wq->flags & WQ_HIGHPRI)) break; } - gcwq->flags |= GCWQ_HIGHPRI_PENDING; + pool->gcwq->flags |= GCWQ_HIGHPRI_PENDING; return &twork->entry; } @@ -933,7 +934,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - struct global_cwq *gcwq = cwq->pool->gcwq; + struct worker_pool *pool = cwq->pool; /* we own @work, set data and link */ set_work_cwq(work, cwq, extra_flags); @@ -953,8 +954,8 @@ static void insert_work(struct cpu_workqueue_struct *cwq, */ smp_mb(); - if (__need_more_worker(gcwq)) - wake_up_worker(gcwq); + if (__need_more_worker(pool)) + wake_up_worker(pool); } /* @@ -1056,7 +1057,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, if (likely(cwq->nr_active < cwq->max_active)) { trace_workqueue_activate_work(work); cwq->nr_active++; - worklist = gcwq_determine_ins_pos(gcwq, cwq); + worklist = pool_determine_ins_pos(cwq->pool, cwq); } else { work_flags |= WORK_STRUCT_DELAYED; worklist = &cwq->delayed_works; @@ -1221,7 +1222,7 @@ static void worker_enter_idle(struct worker *worker) list_add(&worker->entry, &pool->idle_list); if (likely(!(worker->flags & WORKER_ROGUE))) { - if (too_many_workers(gcwq) && !timer_pending(&pool->idle_timer)) + if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); } else @@ -1234,7 +1235,7 @@ static void worker_enter_idle(struct worker *worker) */ WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && pool->nr_workers == pool->nr_idle && - atomic_read(get_gcwq_nr_running(gcwq->cpu))); + atomic_read(get_pool_nr_running(pool))); } /** @@ -1356,10 +1357,10 @@ static struct worker *alloc_worker(void) /** * create_worker - create a new workqueue worker - * @gcwq: gcwq the new worker will belong to + * @pool: pool the new worker will belong to * @bind: whether to set affinity to @cpu or not * - * Create a new worker which is bound to @gcwq. The returned worker + * Create a new worker which is bound to @pool. The returned worker * can be started by calling start_worker() or destroyed using * destroy_worker(). * @@ -1369,10 +1370,10 @@ static struct worker *alloc_worker(void) * RETURNS: * Pointer to the newly created worker. */ -static struct worker *create_worker(struct global_cwq *gcwq, bool bind) +static struct worker *create_worker(struct worker_pool *pool, bool bind) { + struct global_cwq *gcwq = pool->gcwq; bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; - struct worker_pool *pool = &gcwq->pool; struct worker *worker = NULL; int id = -1; @@ -1480,27 +1481,27 @@ static void destroy_worker(struct worker *worker) ida_remove(&pool->worker_ida, id); } -static void idle_worker_timeout(unsigned long __gcwq) +static void idle_worker_timeout(unsigned long __pool) { - struct global_cwq *gcwq = (void *)__gcwq; + struct worker_pool *pool = (void *)__pool; + struct global_cwq *gcwq = pool->gcwq; spin_lock_irq(&gcwq->lock); - if (too_many_workers(gcwq)) { + if (too_many_workers(pool)) { struct worker *worker; unsigned long expires; /* idle_list is kept in LIFO order, check the last one */ - worker = list_entry(gcwq->pool.idle_list.prev, struct worker, - entry); + worker = list_entry(pool->idle_list.prev, struct worker, entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) - mod_timer(&gcwq->pool.idle_timer, expires); + mod_timer(&pool->idle_timer, expires); else { /* it's been idle for too long, wake up manager */ gcwq->flags |= GCWQ_MANAGE_WORKERS; - wake_up_worker(gcwq); + wake_up_worker(pool); } } @@ -1526,37 +1527,38 @@ static bool send_mayday(struct work_struct *work) return true; } -static void gcwq_mayday_timeout(unsigned long __gcwq) +static void gcwq_mayday_timeout(unsigned long __pool) { - struct global_cwq *gcwq = (void *)__gcwq; + struct worker_pool *pool = (void *)__pool; + struct global_cwq *gcwq = pool->gcwq; struct work_struct *work; spin_lock_irq(&gcwq->lock); - if (need_to_create_worker(gcwq)) { + if (need_to_create_worker(pool)) { /* * We've been trying to create a new worker but * haven't been successful. We might be hitting an * allocation deadlock. Send distress signals to * rescuers. */ - list_for_each_entry(work, &gcwq->pool.worklist, entry) + list_for_each_entry(work, &pool->worklist, entry) send_mayday(work); } spin_unlock_irq(&gcwq->lock); - mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INTERVAL); + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); } /** * maybe_create_worker - create a new worker if necessary - * @gcwq: gcwq to create a new worker for + * @pool: pool to create a new worker for * - * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to + * Create a new worker for @pool if necessary. @pool is guaranteed to * have at least one idle worker on return from this function. If * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is - * sent to all rescuers with works scheduled on @gcwq to resolve + * sent to all rescuers with works scheduled on @pool to resolve * possible allocation deadlock. * * On return, need_to_create_worker() is guaranteed to be false and @@ -1571,52 +1573,54 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) * false if no action was taken and gcwq->lock stayed locked, true * otherwise. */ -static bool maybe_create_worker(struct global_cwq *gcwq) +static bool maybe_create_worker(struct worker_pool *pool) __releases(&gcwq->lock) __acquires(&gcwq->lock) { - if (!need_to_create_worker(gcwq)) + struct global_cwq *gcwq = pool->gcwq; + + if (!need_to_create_worker(pool)) return false; restart: spin_unlock_irq(&gcwq->lock); /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ - mod_timer(&gcwq->pool.mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); + mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); while (true) { struct worker *worker; - worker = create_worker(gcwq, true); + worker = create_worker(pool, true); if (worker) { - del_timer_sync(&gcwq->pool.mayday_timer); + del_timer_sync(&pool->mayday_timer); spin_lock_irq(&gcwq->lock); start_worker(worker); - BUG_ON(need_to_create_worker(gcwq)); + BUG_ON(need_to_create_worker(pool)); return true; } - if (!need_to_create_worker(gcwq)) + if (!need_to_create_worker(pool)) break; __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(CREATE_COOLDOWN); - if (!need_to_create_worker(gcwq)) + if (!need_to_create_worker(pool)) break; } - del_timer_sync(&gcwq->pool.mayday_timer); + del_timer_sync(&pool->mayday_timer); spin_lock_irq(&gcwq->lock); - if (need_to_create_worker(gcwq)) + if (need_to_create_worker(pool)) goto restart; return true; } /** * maybe_destroy_worker - destroy workers which have been idle for a while - * @gcwq: gcwq to destroy workers for + * @pool: pool to destroy workers for * - * Destroy @gcwq workers which have been idle for longer than + * Destroy @pool workers which have been idle for longer than * IDLE_WORKER_TIMEOUT. * * LOCKING: @@ -1627,20 +1631,19 @@ restart: * false if no action was taken and gcwq->lock stayed locked, true * otherwise. */ -static bool maybe_destroy_workers(struct global_cwq *gcwq) +static bool maybe_destroy_workers(struct worker_pool *pool) { bool ret = false; - while (too_many_workers(gcwq)) { + while (too_many_workers(pool)) { struct worker *worker; unsigned long expires; - worker = list_entry(gcwq->pool.idle_list.prev, struct worker, - entry); + worker = list_entry(pool->idle_list.prev, struct worker, entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) { - mod_timer(&gcwq->pool.idle_timer, expires); + mod_timer(&pool->idle_timer, expires); break; } @@ -1673,7 +1676,8 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq) */ static bool manage_workers(struct worker *worker) { - struct global_cwq *gcwq = worker->pool->gcwq; + struct worker_pool *pool = worker->pool; + struct global_cwq *gcwq = pool->gcwq; bool ret = false; if (gcwq->flags & GCWQ_MANAGING_WORKERS) @@ -1686,8 +1690,8 @@ static bool manage_workers(struct worker *worker) * Destroy and then create so that may_start_working() is true * on return. */ - ret |= maybe_destroy_workers(gcwq); - ret |= maybe_create_worker(gcwq); + ret |= maybe_destroy_workers(pool); + ret |= maybe_create_worker(pool); gcwq->flags &= ~GCWQ_MANAGING_WORKERS; @@ -1746,7 +1750,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) { struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); - struct list_head *pos = gcwq_determine_ins_pos(cwq->pool->gcwq, cwq); + struct list_head *pos = pool_determine_ins_pos(cwq->pool, cwq); trace_workqueue_activate_work(work); move_linked_works(work, pos, NULL); @@ -1874,7 +1878,7 @@ __acquires(&gcwq->lock) if (!list_empty(&pool->worklist) && get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) - wake_up_worker(gcwq); + wake_up_worker(pool); else gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; } @@ -1890,8 +1894,8 @@ __acquires(&gcwq->lock) * Unbound gcwq isn't concurrency managed and work items should be * executed ASAP. Wake up another worker if necessary. */ - if ((worker->flags & WORKER_UNBOUND) && need_more_worker(gcwq)) - wake_up_worker(gcwq); + if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) + wake_up_worker(pool); spin_unlock_irq(&gcwq->lock); @@ -1983,11 +1987,11 @@ woke_up: worker_leave_idle(worker); recheck: /* no more worker necessary? */ - if (!need_more_worker(gcwq)) + if (!need_more_worker(pool)) goto sleep; /* do we need to manage? */ - if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) + if (unlikely(!may_start_working(pool)) && manage_workers(worker)) goto recheck; /* @@ -2018,11 +2022,11 @@ recheck: move_linked_works(work, &worker->scheduled, NULL); process_scheduled_works(worker); } - } while (keep_working(gcwq)); + } while (keep_working(pool)); worker_set_flags(worker, WORKER_PREP, false); sleep: - if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) + if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) goto recheck; /* @@ -2107,8 +2111,8 @@ repeat: * regular worker; otherwise, we end up with 0 concurrency * and stalling the execution. */ - if (keep_working(gcwq)) - wake_up_worker(gcwq); + if (keep_working(pool)) + wake_up_worker(pool); spin_unlock_irq(&gcwq->lock); } @@ -3383,7 +3387,7 @@ static int __cpuinit trustee_thread(void *__gcwq) * keep_working() are always true as long as the worklist is * not empty. */ - atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); + atomic_set(get_pool_nr_running(&gcwq->pool), 0); spin_unlock_irq(&gcwq->lock); del_timer_sync(&gcwq->pool.idle_timer); @@ -3424,9 +3428,9 @@ static int __cpuinit trustee_thread(void *__gcwq) wake_up_process(worker->task); } - if (need_to_create_worker(gcwq)) { + if (need_to_create_worker(&gcwq->pool)) { spin_unlock_irq(&gcwq->lock); - worker = create_worker(gcwq, false); + worker = create_worker(&gcwq->pool, false); spin_lock_irq(&gcwq->lock); if (worker) { worker->flags |= WORKER_ROGUE; @@ -3540,7 +3544,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, /* fall through */ case CPU_UP_PREPARE: BUG_ON(gcwq->pool.first_idle); - new_worker = create_worker(gcwq, false); + new_worker = create_worker(&gcwq->pool, false); if (!new_worker) { if (new_trustee) kthread_stop(new_trustee); @@ -3788,7 +3792,7 @@ void thaw_workqueues(void) cwq_activate_first_delayed(cwq); } - wake_up_worker(gcwq); + wake_up_worker(&gcwq->pool); spin_unlock_irq(&gcwq->lock); } @@ -3822,10 +3826,10 @@ static int __init init_workqueues(void) init_timer_deferrable(&gcwq->pool.idle_timer); gcwq->pool.idle_timer.function = idle_worker_timeout; - gcwq->pool.idle_timer.data = (unsigned long)gcwq; + gcwq->pool.idle_timer.data = (unsigned long)&gcwq->pool; setup_timer(&gcwq->pool.mayday_timer, gcwq_mayday_timeout, - (unsigned long)gcwq); + (unsigned long)&gcwq->pool); ida_init(&gcwq->pool.worker_ida); @@ -3840,7 +3844,7 @@ static int __init init_workqueues(void) if (cpu != WORK_CPU_UNBOUND) gcwq->flags &= ~GCWQ_DISASSOCIATED; - worker = create_worker(gcwq, true); + worker = create_worker(&gcwq->pool, true); BUG_ON(!worker); spin_lock_irq(&gcwq->lock); start_worker(worker); -- cgit v1.2.3 From 11ebea50dbc1ade5994b2c838a096078d4c02399 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 12 Jul 2012 14:46:37 -0700 Subject: workqueue: separate out worker_pool flags GCWQ_MANAGE_WORKERS, GCWQ_MANAGING_WORKERS and GCWQ_HIGHPRI_PENDING are per-pool properties. Add worker_pool->flags and make the above three flags per-pool flags. The changes in this patch are mechanical and don't caues any functional difference. This is to prepare for multiple pools per gcwq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2d82f7b193a0..7a98bae635fa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -46,11 +46,13 @@ enum { /* global_cwq flags */ - GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ - GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ - GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ - GCWQ_FREEZING = 1 << 3, /* freeze in progress */ - GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */ + GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ + GCWQ_FREEZING = 1 << 1, /* freeze in progress */ + + /* pool flags */ + POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ + POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ + POOL_HIGHPRI_PENDING = 1 << 2, /* highpri works on queue */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ @@ -142,6 +144,7 @@ struct worker { struct worker_pool { struct global_cwq *gcwq; /* I: the owning gcwq */ + unsigned int flags; /* X: flags */ struct list_head worklist; /* L: list of pending works */ int nr_workers; /* L: total number of workers */ @@ -583,7 +586,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) static bool __need_more_worker(struct worker_pool *pool) { return !atomic_read(get_pool_nr_running(pool)) || - pool->gcwq->flags & GCWQ_HIGHPRI_PENDING; + (pool->flags & POOL_HIGHPRI_PENDING); } /* @@ -612,7 +615,7 @@ static bool keep_working(struct worker_pool *pool) return !list_empty(&pool->worklist) && (atomic_read(nr_running) <= 1 || - pool->gcwq->flags & GCWQ_HIGHPRI_PENDING); + (pool->flags & POOL_HIGHPRI_PENDING)); } /* Do we need a new worker? Called from manager. */ @@ -625,13 +628,13 @@ static bool need_to_create_worker(struct worker_pool *pool) static bool need_to_manage_workers(struct worker_pool *pool) { return need_to_create_worker(pool) || - pool->gcwq->flags & GCWQ_MANAGE_WORKERS; + (pool->flags & POOL_MANAGE_WORKERS); } /* Do we have too many workers and should some go away? */ static bool too_many_workers(struct worker_pool *pool) { - bool managing = pool->gcwq->flags & GCWQ_MANAGING_WORKERS; + bool managing = pool->flags & POOL_MANAGING_WORKERS; int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_busy = pool->nr_workers - nr_idle; @@ -889,7 +892,7 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, * position for the work. If @cwq is for HIGHPRI wq, the work is * queued at the head of the queue but in FIFO order with respect to * other HIGHPRI works; otherwise, at the end of the queue. This - * function also sets GCWQ_HIGHPRI_PENDING flag to hint @pool that + * function also sets POOL_HIGHPRI_PENDING flag to hint @pool that * there are HIGHPRI works pending. * * CONTEXT: @@ -913,7 +916,7 @@ static inline struct list_head *pool_determine_ins_pos(struct worker_pool *pool, break; } - pool->gcwq->flags |= GCWQ_HIGHPRI_PENDING; + pool->flags |= POOL_HIGHPRI_PENDING; return &twork->entry; } @@ -1500,7 +1503,7 @@ static void idle_worker_timeout(unsigned long __pool) mod_timer(&pool->idle_timer, expires); else { /* it's been idle for too long, wake up manager */ - gcwq->flags |= GCWQ_MANAGE_WORKERS; + pool->flags |= POOL_MANAGE_WORKERS; wake_up_worker(pool); } } @@ -1680,11 +1683,11 @@ static bool manage_workers(struct worker *worker) struct global_cwq *gcwq = pool->gcwq; bool ret = false; - if (gcwq->flags & GCWQ_MANAGING_WORKERS) + if (pool->flags & POOL_MANAGING_WORKERS) return ret; - gcwq->flags &= ~GCWQ_MANAGE_WORKERS; - gcwq->flags |= GCWQ_MANAGING_WORKERS; + pool->flags &= ~POOL_MANAGE_WORKERS; + pool->flags |= POOL_MANAGING_WORKERS; /* * Destroy and then create so that may_start_working() is true @@ -1693,7 +1696,7 @@ static bool manage_workers(struct worker *worker) ret |= maybe_destroy_workers(pool); ret |= maybe_create_worker(pool); - gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + pool->flags &= ~POOL_MANAGING_WORKERS; /* * The trustee might be waiting to take over the manager @@ -1872,7 +1875,7 @@ __acquires(&gcwq->lock) * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, * wake up another worker; otherwise, clear HIGHPRI_PENDING. */ - if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { + if (unlikely(pool->flags & POOL_HIGHPRI_PENDING)) { struct work_struct *nwork = list_first_entry(&pool->worklist, struct work_struct, entry); @@ -1880,7 +1883,7 @@ __acquires(&gcwq->lock) get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) wake_up_worker(pool); else - gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; + pool->flags &= ~POOL_HIGHPRI_PENDING; } /* @@ -3360,10 +3363,10 @@ static int __cpuinit trustee_thread(void *__gcwq) * cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); - rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); + rc = trustee_wait_event(!(gcwq->pool.flags & POOL_MANAGING_WORKERS)); BUG_ON(rc < 0); - gcwq->flags |= GCWQ_MANAGING_WORKERS; + gcwq->pool.flags |= POOL_MANAGING_WORKERS; list_for_each_entry(worker, &gcwq->pool.idle_list, entry) worker->flags |= WORKER_ROGUE; @@ -3487,7 +3490,7 @@ static int __cpuinit trustee_thread(void *__gcwq) } /* relinquish manager role */ - gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + gcwq->pool.flags &= ~POOL_MANAGING_WORKERS; /* notify completion */ gcwq->trustee = NULL; @@ -3604,7 +3607,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, spin_unlock_irq(&gcwq->lock); kthread_bind(gcwq->pool.first_idle->task, cpu); spin_lock_irq(&gcwq->lock); - gcwq->flags |= GCWQ_MANAGE_WORKERS; + gcwq->pool.flags |= POOL_MANAGE_WORKERS; start_worker(gcwq->pool.first_idle); gcwq->pool.first_idle = NULL; break; -- cgit v1.2.3 From 4ce62e9e30cacc26885cab133ad1de358dd79f21 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 13 Jul 2012 22:16:44 -0700 Subject: workqueue: introduce NR_WORKER_POOLS and for_each_worker_pool() Introduce NR_WORKER_POOLS and for_each_worker_pool() and convert code paths which need to manipulate all pools in a gcwq to use them. NR_WORKER_POOLS is currently one and for_each_worker_pool() iterates over only @gcwq->pool. Note that nr_running is per-pool property and converted to an array with NR_WORKER_POOLS elements and renamed to pool_nr_running. Note that get_pool_nr_running() currently assumes 0 index. The next patch will make use of non-zero index. The changes in this patch are mechanical and don't caues any functional difference. This is to prepare for multiple pools per gcwq. v2: nr_running indexing bug in get_pool_nr_running() fixed. v3: Pointer to array is stupid. Don't use it in get_pool_nr_running() as suggested by Linus. Signed-off-by: Tejun Heo Cc: Tony Luck Cc: Fengguang Wu Cc: Linus Torvalds --- kernel/workqueue.c | 223 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 153 insertions(+), 70 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7a98bae635fa..b0daaea44eaa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -74,6 +74,8 @@ enum { TRUSTEE_RELEASE = 3, /* release workers */ TRUSTEE_DONE = 4, /* trustee is done */ + NR_WORKER_POOLS = 1, /* # worker pools per gcwq */ + BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, @@ -274,6 +276,9 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq); #define CREATE_TRACE_POINTS #include +#define for_each_worker_pool(pool, gcwq) \ + for ((pool) = &(gcwq)->pool; (pool); (pool) = NULL) + #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) @@ -454,7 +459,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ * try_to_wake_up(). Put it in a separate cacheline. */ static DEFINE_PER_CPU(struct global_cwq, global_cwq); -static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); +static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]); /* * Global cpu workqueue and nr_running counter for unbound gcwq. The @@ -462,7 +467,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); * workers have WORKER_UNBOUND set. */ static struct global_cwq unbound_global_cwq; -static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ +static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = { + [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */ +}; static int worker_thread(void *__worker); @@ -477,11 +484,12 @@ static struct global_cwq *get_gcwq(unsigned int cpu) static atomic_t *get_pool_nr_running(struct worker_pool *pool) { int cpu = pool->gcwq->cpu; + int idx = 0; if (cpu != WORK_CPU_UNBOUND) - return &per_cpu(gcwq_nr_running, cpu); + return &per_cpu(pool_nr_running, cpu)[idx]; else - return &unbound_gcwq_nr_running; + return &unbound_pool_nr_running[idx]; } static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, @@ -3345,9 +3353,30 @@ EXPORT_SYMBOL_GPL(work_busy); __ret1 < 0 ? -1 : 0; \ }) +static bool gcwq_is_managing_workers(struct global_cwq *gcwq) +{ + struct worker_pool *pool; + + for_each_worker_pool(pool, gcwq) + if (pool->flags & POOL_MANAGING_WORKERS) + return true; + return false; +} + +static bool gcwq_has_idle_workers(struct global_cwq *gcwq) +{ + struct worker_pool *pool; + + for_each_worker_pool(pool, gcwq) + if (!list_empty(&pool->idle_list)) + return true; + return false; +} + static int __cpuinit trustee_thread(void *__gcwq) { struct global_cwq *gcwq = __gcwq; + struct worker_pool *pool; struct worker *worker; struct work_struct *work; struct hlist_node *pos; @@ -3363,13 +3392,15 @@ static int __cpuinit trustee_thread(void *__gcwq) * cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); - rc = trustee_wait_event(!(gcwq->pool.flags & POOL_MANAGING_WORKERS)); + rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq)); BUG_ON(rc < 0); - gcwq->pool.flags |= POOL_MANAGING_WORKERS; + for_each_worker_pool(pool, gcwq) { + pool->flags |= POOL_MANAGING_WORKERS; - list_for_each_entry(worker, &gcwq->pool.idle_list, entry) - worker->flags |= WORKER_ROGUE; + list_for_each_entry(worker, &pool->idle_list, entry) + worker->flags |= WORKER_ROGUE; + } for_each_busy_worker(worker, i, pos, gcwq) worker->flags |= WORKER_ROGUE; @@ -3390,10 +3421,12 @@ static int __cpuinit trustee_thread(void *__gcwq) * keep_working() are always true as long as the worklist is * not empty. */ - atomic_set(get_pool_nr_running(&gcwq->pool), 0); + for_each_worker_pool(pool, gcwq) + atomic_set(get_pool_nr_running(pool), 0); spin_unlock_irq(&gcwq->lock); - del_timer_sync(&gcwq->pool.idle_timer); + for_each_worker_pool(pool, gcwq) + del_timer_sync(&pool->idle_timer); spin_lock_irq(&gcwq->lock); /* @@ -3415,29 +3448,38 @@ static int __cpuinit trustee_thread(void *__gcwq) * may be frozen works in freezable cwqs. Don't declare * completion while frozen. */ - while (gcwq->pool.nr_workers != gcwq->pool.nr_idle || - gcwq->flags & GCWQ_FREEZING || - gcwq->trustee_state == TRUSTEE_IN_CHARGE) { - int nr_works = 0; + while (true) { + bool busy = false; - list_for_each_entry(work, &gcwq->pool.worklist, entry) { - send_mayday(work); - nr_works++; - } + for_each_worker_pool(pool, gcwq) + busy |= pool->nr_workers != pool->nr_idle; - list_for_each_entry(worker, &gcwq->pool.idle_list, entry) { - if (!nr_works--) - break; - wake_up_process(worker->task); - } + if (!busy && !(gcwq->flags & GCWQ_FREEZING) && + gcwq->trustee_state != TRUSTEE_IN_CHARGE) + break; - if (need_to_create_worker(&gcwq->pool)) { - spin_unlock_irq(&gcwq->lock); - worker = create_worker(&gcwq->pool, false); - spin_lock_irq(&gcwq->lock); - if (worker) { - worker->flags |= WORKER_ROGUE; - start_worker(worker); + for_each_worker_pool(pool, gcwq) { + int nr_works = 0; + + list_for_each_entry(work, &pool->worklist, entry) { + send_mayday(work); + nr_works++; + } + + list_for_each_entry(worker, &pool->idle_list, entry) { + if (!nr_works--) + break; + wake_up_process(worker->task); + } + + if (need_to_create_worker(pool)) { + spin_unlock_irq(&gcwq->lock); + worker = create_worker(pool, false); + spin_lock_irq(&gcwq->lock); + if (worker) { + worker->flags |= WORKER_ROGUE; + start_worker(worker); + } } } @@ -3452,11 +3494,18 @@ static int __cpuinit trustee_thread(void *__gcwq) * all workers till we're canceled. */ do { - rc = trustee_wait_event(!list_empty(&gcwq->pool.idle_list)); - while (!list_empty(&gcwq->pool.idle_list)) - destroy_worker(list_first_entry(&gcwq->pool.idle_list, - struct worker, entry)); - } while (gcwq->pool.nr_workers && rc >= 0); + rc = trustee_wait_event(gcwq_has_idle_workers(gcwq)); + + i = 0; + for_each_worker_pool(pool, gcwq) { + while (!list_empty(&pool->idle_list)) { + worker = list_first_entry(&pool->idle_list, + struct worker, entry); + destroy_worker(worker); + } + i |= pool->nr_workers; + } + } while (i && rc >= 0); /* * At this point, either draining has completed and no worker @@ -3465,7 +3514,8 @@ static int __cpuinit trustee_thread(void *__gcwq) * Tell the remaining busy ones to rebind once it finishes the * currently scheduled works by scheduling the rebind_work. */ - WARN_ON(!list_empty(&gcwq->pool.idle_list)); + for_each_worker_pool(pool, gcwq) + WARN_ON(!list_empty(&pool->idle_list)); for_each_busy_worker(worker, i, pos, gcwq) { struct work_struct *rebind_work = &worker->rebind_work; @@ -3490,7 +3540,8 @@ static int __cpuinit trustee_thread(void *__gcwq) } /* relinquish manager role */ - gcwq->pool.flags &= ~POOL_MANAGING_WORKERS; + for_each_worker_pool(pool, gcwq) + pool->flags &= ~POOL_MANAGING_WORKERS; /* notify completion */ gcwq->trustee = NULL; @@ -3532,8 +3583,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct global_cwq *gcwq = get_gcwq(cpu); struct task_struct *new_trustee = NULL; - struct worker *uninitialized_var(new_worker); + struct worker