From b56c0d8937e665a27d90517ee7a746d0aa05af46 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_worker Implement simple work processor for kthread. This is to ease using kthread. Single thread workqueue used to be used for things like this but workqueue won't guarantee fixed kthread association anymore to enable worker sharing. This can be used in cases where specific kthread association is necessary, for example, when it should have RT priority or be assigned to certain cgroup. Signed-off-by: Tejun Heo Cc: Andrew Morton --- include/linux/kthread.h | 64 +++++++++++++++++++++ kernel/kthread.c | 149 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index aabc8a13ba71..f93cb6979edc 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -34,4 +34,68 @@ int kthread_should_stop(void); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; +/* + * Simple work processor based on kthread. + * + * This provides easier way to make use of kthreads. A kthread_work + * can be queued and flushed using queue/flush_kthread_work() + * respectively. Queued kthread_works are processed by a kthread + * running kthread_worker_fn(). + * + * A kthread_work can't be freed while it is executing. + */ +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); + +struct kthread_worker { + spinlock_t lock; + struct list_head work_list; + struct task_struct *task; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + wait_queue_head_t done; + atomic_t flushing; + int queue_seq; + int done_seq; +}; + +#define KTHREAD_WORKER_INIT(worker) { \ + .lock = SPIN_LOCK_UNLOCKED, \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + } + +#define KTHREAD_WORK_INIT(work, fn) { \ + .node = LIST_HEAD_INIT((work).node), \ + .func = (fn), \ + .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ + .flushing = ATOMIC_INIT(0), \ + } + +#define DEFINE_KTHREAD_WORKER(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) + +#define DEFINE_KTHREAD_WORK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT(work, fn) + +static inline void init_kthread_worker(struct kthread_worker *worker) +{ + *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); +} + +static inline void init_kthread_work(struct kthread_work *work, + kthread_work_func_t fn) +{ + *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); +} + +int kthread_worker_fn(void *worker_ptr); + +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work); +void flush_kthread_work(struct kthread_work *work); +void flush_kthread_worker(struct kthread_worker *worker); + #endif /* _LINUX_KTHREAD_H */ diff --git a/kernel/kthread.c b/kernel/kthread.c index 83911c780175..8b63c7fee73b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include static DEFINE_SPINLOCK(kthread_create_lock); @@ -247,3 +249,150 @@ int kthreadd(void *unused) return 0; } + +/** + * kthread_worker_fn - kthread function to process kthread_worker + * @worker_ptr: pointer to initialized kthread_worker + * + * This function can be used as @threadfn to kthread_create() or + * kthread_run() with @worker_ptr argument pointing to an initialized + * kthread_worker. The started kthread will process work_list until + * the it is stopped with kthread_stop(). A kthread can also call + * this function directly after extra initialization. + * + * Different kthreads can be used for the same kthread_worker as long + * as there's only one kthread attached to it at any given time. A + * kthread_worker without an attached kthread simply collects queued + * kthread_works. + */ +int kthread_worker_fn(void *worker_ptr) +{ + struct kthread_worker *worker = worker_ptr; + struct kthread_work *work; + + WARN_ON(worker->task); + worker->task = current; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + spin_lock_irq(&worker->lock); + worker->task = NULL; + spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; + spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); + work->func(work); + smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ + work->done_seq = work->queue_seq; + smp_mb(); /* mb worker-b1 paired with flush-b0 */ + if (atomic_read(&work->flushing)) + wake_up_all(&work->done); + } else if (!freezing(current)) + schedule(); + + try_to_freeze(); + goto repeat; +} +EXPORT_SYMBOL_GPL(kthread_worker_fn); + +/** + * queue_kthread_work - queue a kthread_work + * @worker: target kthread_worker + * @work: kthread_work to queue + * + * Queue @work to work processor @task for async execution. @task + * must have been created with kthread_worker_create(). Returns %true + * if @work was successfully queued, %false if it was already pending. + */ +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work) +{ + bool ret = false; + unsigned long flags; + + spin_lock_irqsave(&worker->lock, flags); + if (list_empty(&work->node)) { + list_add_tail(&work->node, &worker->work_list); + work->queue_seq++; + if (likely(worker->task)) + wake_up_process(worker->task); + ret = true; + } + spin_unlock_irqrestore(&worker->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(queue_kthread_work); + +/** + * flush_kthread_work - flush a kthread_work + * @work: work to flush + * + * If @work is queued or executing, wait for it to finish execution. + */ +void flush_kthread_work(struct kthread_work *work) +{ + int seq = work->queue_seq; + + atomic_inc(&work->flushing); + + /* + * mb flush-b0 paired with worker-b1, to make sure either + * worker sees the above increment or we see done_seq update. + */ + smp_mb__after_atomic_inc(); + + /* A - B <= 0 tests whether B is in front of A regardless of overflow */ + wait_event(work->done, seq - work->done_seq <= 0); + atomic_dec(&work->flushing); + + /* + * rmb flush-b1 paired with worker-b0, to make sure our caller + * sees every change made by work->func(). + */ + smp_mb__after_atomic_dec(); +} +EXPORT_SYMBOL_GPL(flush_kthread_work); + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +static void kthread_flush_work_fn(struct kthread_work *work) +{ + struct kthread_flush_work *fwork = + container_of(work, struct kthread_flush_work, work); + complete(&fwork->done); +} + +/** + * flush_kthread_worker - flush all current works on a kthread_worker + * @worker: worker to flush + * + * Wait until all currently executing or pending works on @worker are + * finished. + */ +void flush_kthread_worker(struct kthread_worker *worker) +{ + struct kthread_flush_work fwork = { + KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), + COMPLETION_INITIALIZER_ONSTACK(fwork.done), + }; + + queue_kthread_work(worker, &fwork.work); + wait_for_completion(&fwork.done); +} +EXPORT_SYMBOL_GPL(flush_kthread_worker); -- cgit v1.2.3 From 7bc465605ffa90b281d6b774fcb13911636a6d45 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: ivtv: use kthread_worker instead of workqueue Upcoming workqueue updates will no longer guarantee fixed workqueue to worker kthread association, so giving RT priority to the irq worker won't work. Use kthread_worker which guarantees specific kthread association instead. This also makes setting the priority cleaner. Signed-off-by: Tejun Heo Cc: Andy Walls Cc: Andrew Morton Cc: ivtv-devel@ivtvdriver.org Cc: linux-media@vger.kernel.org --- drivers/media/video/ivtv/ivtv-driver.c | 26 ++++++++++++++++---------- drivers/media/video/ivtv/ivtv-driver.h | 8 ++++---- drivers/media/video/ivtv/ivtv-irq.c | 15 +++------------ drivers/media/video/ivtv/ivtv-irq.h | 2 +- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c index 1b79475ca134..49e0b1cc3544 100644 --- a/drivers/media/video/ivtv/ivtv-driver.c +++ b/drivers/media/video/ivtv/ivtv-driver.c @@ -695,6 +695,8 @@ done: */ static int __devinit ivtv_init_struct1(struct ivtv *itv) { + struct sched_param param = { .sched_priority = 99 }; + itv->base_addr = pci_resource_start(itv->pdev, 0); itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ @@ -706,13 +708,17 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv) spin_lock_init(&itv->lock); spin_lock_init(&itv->dma_reg_lock); - itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name); - if (itv->irq_work_queues == NULL) { - IVTV_ERR("Could not create ivtv workqueue\n"); + init_kthread_worker(&itv->irq_worker); + itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, + itv->v4l2_dev.name); + if (IS_ERR(itv->irq_worker_task)) { + IVTV_ERR("Could not create ivtv task\n"); return -1; } + /* must use the FIFO scheduler as it is realtime sensitive */ + sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m); - INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler); + init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); /* start counting open_id at 1 */ itv->open_id = 1; @@ -996,7 +1002,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev, /* PCI Device Setup */ retval = ivtv_setup_pci(itv, pdev, pci_id); if (retval == -EIO) - goto free_workqueue; + goto free_worker; if (retval == -ENXIO) goto free_mem; @@ -1208,8 +1214,8 @@ free_mem: release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); -free_workqueue: - destroy_workqueue(itv->irq_work_queues); +free_worker: + kthread_stop(itv->irq_worker_task); err: if (retval == 0) retval = -ENODEV; @@ -1353,9 +1359,9 @@ static void ivtv_remove(struct pci_dev *pdev) ivtv_set_irq_mask(itv, 0xffffffff); del_timer_sync(&itv->dma_timer); - /* Stop all Work Queues */ - flush_workqueue(itv->irq_work_queues); - destroy_workqueue(itv->irq_work_queues); + /* Kill irq worker */ + flush_kthread_worker(&itv->irq_worker); + kthread_stop(itv->irq_worker_task); ivtv_streams_cleanup(itv, 1); ivtv_udma_free(itv); diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h index 5b45fd2b2645..51f7f2a9cf5e 100644 --- a/drivers/media/video/ivtv/ivtv-driver.h +++ b/drivers/media/video/ivtv/ivtv-driver.h @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include @@ -257,7 +257,6 @@ struct ivtv_mailbox_data { #define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */ #define IVTV_F_I_INITED 21 /* set after first open */ #define IVTV_F_I_FAILED 22 /* set if first open failed */ -#define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */ /* Event notifications */ #define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ @@ -663,8 +662,9 @@ struct ivtv { /* Interrupts & DMA */ u32 irqmask; /* active interrupts */ u32 irq_rr_idx; /* round-robin stream index */ - struct workqueue_struct *irq_work_queues; /* workqueue for PIO/YUV/VBI actions */ - struct work_struct irq_work_queue; /* work entry */ + struct kthread_worker irq_worker; /* kthread worker for PIO/YUV/VBI actions */ + struct task_struct *irq_worker_task; /* task for irq_worker */ + struct kthread_work irq_work; /* kthread work entry */ spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */ int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */ diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index fea1ec33b0df..9b4faf009196 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -71,19 +71,10 @@ static void ivtv_pio_work_handler(struct ivtv *itv) write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); } -void ivtv_irq_work_handler(struct work_struct *work) +void ivtv_irq_work_handler(struct kthread_work *work) { - struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue); + struct ivtv *itv = container_of(work, struct ivtv, irq_work); - DEFINE_WAIT(wait); - - if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) { - struct sched_param param = { .sched_priority = 99 }; - - /* This thread must use the FIFO scheduler as it - is realtime sensitive. */ - sched_setscheduler(current, SCHED_FIFO, ¶m); - } if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) ivtv_pio_work_handler(itv); @@ -975,7 +966,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id) } if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { - queue_work(itv->irq_work_queues, &itv->irq_work_queue); + queue_kthread_work(&itv->irq_worker, &itv->irq_work); } spin_unlock(&itv->dma_reg_lock); diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h index f879a5822e71..1e84433737cc 100644 --- a/drivers/media/video/ivtv/ivtv-irq.h +++ b/drivers/media/video/ivtv/ivtv-irq.h @@ -46,7 +46,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id); -void ivtv_irq_work_handler(struct work_struct *work); +void ivtv_irq_work_handler(struct kthread_work *work); void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock); void ivtv_unfinished_dma(unsigned long arg); -- cgit v1.2.3 From 82805ab77d25643f579d90397dcd34f05d1b750a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_data() Implement kthread_data() which takes @task pointing to a kthread and returns @data specified when creating the kthread. The caller is responsible for ensuring the validity of @task when calling this function. Signed-off-by: Tejun Heo --- include/linux/kthread.h | 1 + kernel/kthread.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index f93cb6979edc..685ea65eb803 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -30,6 +30,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); int kthread_should_stop(void); +void *kthread_data(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/kernel/kthread.c b/kernel/kthread.c index 8b63c7fee73b..2dc3786349d1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -37,6 +37,7 @@ struct kthread_create_info struct kthread { int should_stop; + void *data; struct completion exited; }; @@ -56,6 +57,19 @@ int kthread_should_stop(void) } EXPORT_SYMBOL(kthread_should_stop); +/** + * kthread_data - return data value specified on kthread creation + * @task: kthread task in question + * + * Return the data value specified when kthread @task was created. + * The caller is responsible for ensuring the validity of @task when + * calling this function. + */ +void *kthread_data(struct task_struct *task) +{ + return to_kthread(task)->data; +} + static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -66,6 +80,7 @@ static int kthread(void *_create) int ret; self.should_stop = 0; + self.data = data; init_completion(&self.exited); current->vfork_done = &self.exited; -- cgit v1.2.3 From 8fec62b2d9d0c80b594d0d85678bfdf57a70df1b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: acpi: use queue_work_on() instead of binding workqueue worker to cpu0 ACPI works need to be executed on cpu0 and acpi/osl.c achieves this by creating singlethread workqueue and then binding it to cpu0 from a work which is quite unorthodox. Make it create regular workqueues and use queue_work_on() instead. This is in preparation of concurrency managed workqueue and the extra workers won't be a problem after it's implemented. Signed-off-by: Tejun Heo --- drivers/acpi/osl.c | 40 +++++++++++----------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 78418ce4fc78..46cce391fa46 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void) return AE_OK; } -static void bind_to_cpu0(struct work_struct *work) -{ - set_cpus_allowed_ptr(current, cpumask_of(0)); - kfree(work); -} - -static void bind_workqueue(struct workqueue_struct *wq) -{ - struct work_struct *work; - - work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); - INIT_WORK(work, bind_to_cpu0); - queue_work(wq, work); -} - acpi_status acpi_os_initialize1(void) { - /* - * On some machines, a software-initiated SMI causes corruption unless - * the SMI runs on CPU 0. An SMI can be initiated by any AML, but - * typically it's done in GPE-related methods that are run via - * workqueues, so we can avoid the known corruption cases by binding - * the workqueues to CPU 0. - */ - kacpid_wq = create_singlethread_workqueue("kacpid"); - bind_workqueue(kacpid_wq); - kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); - bind_workqueue(kacpi_notify_wq); - kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); - bind_workqueue(kacpi_hotplug_wq); + kacpid_wq = create_workqueue("kacpid"); + kacpi_notify_wq = create_workqueue("kacpi_notify"); + kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); BUG_ON(!kacpi_hotplug_wq); @@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, else INIT_WORK(&dpc->work, acpi_os_execute_deferred); - ret = queue_work(queue, &dpc->work); + /* + * On some machines, a software-initiated SMI causes corruption unless + * the SMI runs on CPU 0. An SMI can be initiated by any AML, but + * typically it's done in GPE-related methods that are run via + * workqueues, so we can avoid the known corruption cases by always + * queueing on CPU 0. + */ + ret = queue_work_on(0, queue, &dpc->work); if (!ret) { printk(KERN_ERR PREFIX -- cgit v1.2.3 From c790bce0481857412c964c5e9d46d56e41c4b051 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: workqueue: kill RT workqueue With stop_machine() converted to use cpu_stop, RT workqueue doesn't have any user left. Kill RT workqueue support. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 20 +++++++++----------- kernel/workqueue.c | 6 ------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9466e860d8c2..0697946c66a1 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -181,12 +181,11 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } extern struct workqueue_struct * -__create_workqueue_key(const char *name, int singlethread, - int freezeable, int rt, struct lock_class_key *key, - const char *lock_name); +__create_workqueue_key(const char *name, int singlethread, int freezeable, + struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, singlethread, freezeable, rt) \ +#define __create_workqueue(name, singlethread, freezeable) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -197,19 +196,18 @@ __create_workqueue_key(const char *name, int singlethread, __lock_name = #name; \ \ __create_workqueue_key((name), (singlethread), \ - (freezeable), (rt), &__key, \ + (freezeable), &__key, \ __lock_name); \ }) #else -#define __create_workqueue(name, singlethread, freezeable, rt) \ - __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ +#define __create_workqueue(name, singlethread, freezeable) \ + __create_workqueue_key((name), (singlethread), (freezeable), \ NULL, NULL) #endif -#define create_workqueue(name) __create_workqueue((name), 0, 0, 0) -#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) -#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) +#define create_workqueue(name) __create_workqueue((name), 0, 0) +#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) +#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 327d2deb4451..1a47fbf92fae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -62,7 +62,6 @@ struct workqueue_struct { const char *name; int singlethread; int freezeable; /* Freeze threads during suspend */ - int rt; #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -947,7 +946,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; struct workqueue_struct *wq = cwq->wq; const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; @@ -963,8 +961,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) */ if (IS_ERR(p)) return PTR_ERR(p); - if (cwq->wq->rt) - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); cwq->thread = p; trace_workqueue_creation(cwq->thread, cpu); @@ -986,7 +982,6 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) struct workqueue_struct *__create_workqueue_key(const char *name, int singlethread, int freezeable, - int rt, struct lock_class_key *key, const char *lock_name) { @@ -1008,7 +1003,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); wq->singlethread = singlethread; wq->freezeable = freezeable; - wq->rt = rt; INIT_LIST_HEAD(&wq->list); if (singlethread) { -- cgit v1.2.3 From 4690c4ab56c71919893ca25252f2dd65b58188c7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: misc/cosmetic updates Make the following updates in preparation of concurrency managed workqueue. None of these changes causes any visible behavior difference. * Add comments and adjust indentations to data structures and several functions. * Rename wq_per_cpu() to get_cwq() and swap the position of two parameters for consistency. Convert a direct per_cpu_ptr() access to wq->cpu_wq to get_cwq(). * Add work_static() and Update set_wq_data() such that it sets the flags part to WORK_STRUCT_PENDING | WORK_STRUCT_STATIC if static | @extra_flags. * Move santiy check on work->entry emptiness from queue_work_on() to __queue_work() which all queueing paths share. * Make __queue_work() take @cpu and @wq instead of @cwq. * Restructure flush_work() and __create_workqueue_key() to make them easier to modify. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 5 ++ kernel/workqueue.c | 131 +++++++++++++++++++++++++++++----------------- 2 files changed, 89 insertions(+), 47 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0697946c66a1..e724dafc9e6d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -96,9 +96,14 @@ struct execute_work { #ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); +static inline unsigned int work_static(struct work_struct *work) +{ + return *work_data_bits(work) & (1 << WORK_STRUCT_STATIC); +} #else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } +static inline unsigned int work_static(struct work_struct *work) { return 0; } #endif /* diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a47fbf92fae..c56146a755e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -36,6 +36,16 @@ #define CREATE_TRACE_POINTS #include +/* + * Structure fields follow one of the following exclusion rules. + * + * I: Set during initialization and read-only afterwards. + * + * L: cwq->lock protected. Access with cwq->lock held. + * + * W: workqueue_lock protected. + */ + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). @@ -48,8 +58,8 @@ struct cpu_workqueue_struct { wait_queue_head_t more_work; struct work_struct *current_work; - struct workqueue_struct *wq; - struct task_struct *thread; + struct workqueue_struct *wq; /* I: the owning workqueue */ + struct task_struct *thread; } ____cacheline_aligned; /* @@ -57,13 +67,13 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { - struct cpu_workqueue_struct *cpu_wq; - struct list_head list; - const char *name; + struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ + struct list_head list; /* W: list of all workqueues */ + const char *name; /* I: workqueue name */ int singlethread; int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; + struct lockdep_map lockdep_map; #endif }; @@ -204,8 +214,8 @@ static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) ? cpu_singlethread_map : cpu_populated_map; } -static -struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { if (unlikely(is_wq_single_threaded(wq))) cpu = singlethread_cpu; @@ -217,15 +227,13 @@ struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) * - Must *only* be called if the pending flag is set */ static inline void set_wq_data(struct work_struct *work, - struct cpu_workqueue_struct *cwq) + struct cpu_workqueue_struct *cwq, + unsigned long extra_flags) { - unsigned long new; - BUG_ON(!work_pending(work)); - new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); - new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); - atomic_long_set(&work->data, new); + atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | + (1UL << WORK_STRUCT_PENDING) | extra_flags); } /* @@ -233,9 +241,7 @@ static inline void set_wq_data(struct work_struct *work, */ static inline void clear_wq_data(struct work_struct *work) { - unsigned long flags = *work_data_bits(work) & - (1UL << WORK_STRUCT_STATIC); - atomic_long_set(&work->data, flags); + atomic_long_set(&work->data, work_static(work)); } static inline @@ -244,29 +250,47 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } +/** + * insert_work - insert a work into cwq + * @cwq: cwq @work belongs to + * @work: work to insert + * @head: insertion point + * @extra_flags: extra WORK_STRUCT_* flags to set + * + * Insert @work into @cwq after @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work, struct list_head *head) + struct work_struct *work, struct list_head *head, + unsigned int extra_flags) { trace_workqueue_insertion(cwq->thread, work); - set_wq_data(work, cwq); + /* we own @work, set data and link */ + set_wq_data(work, cwq, extra_flags); + /* * Ensure that we get the right work->data if we see the * result of list_add() below, see try_to_grab_pending(). */ smp_wmb(); + list_add_tail(&work->entry, head); wake_up(&cwq->more_work); } -static void __queue_work(struct cpu_workqueue_struct *cwq, +static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); spin_lock_irqsave(&cwq->lock, flags); - insert_work(cwq, work, &cwq->worklist); + BUG_ON(!list_empty(&work->entry)); + insert_work(cwq, work, &cwq->worklist, 0); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -308,8 +332,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) int ret = 0; if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { - BUG_ON(!list_empty(&work->entry)); - __queue_work(wq_per_cpu(wq, cpu), work); + __queue_work(cpu, wq, work); ret = 1; } return ret; @@ -320,9 +343,8 @@ static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); - struct workqueue_struct *wq = cwq->wq; - __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); + __queue_work(smp_processor_id(), cwq->wq, &dwork->work); } /** @@ -366,7 +388,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); + set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -430,6 +452,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irq(&cwq->lock); } +/** + * worker_thread - the worker thread function + * @__cwq: cwq to serve + * + * The cwq worker thread function. + */ static int worker_thread(void *__cwq) { struct cpu_workqueue_struct *cwq = __cwq; @@ -468,6 +496,17 @@ static void wq_barrier_func(struct work_struct *work) complete(&barr->done); } +/** + * insert_wq_barrier - insert a barrier work + * @cwq: cwq to insert barrier into + * @barr: wq_barrier to insert + * @head: insertion point + * + * Insert barrier @barr into @cwq before @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, struct wq_barrier *barr, struct list_head *head) { @@ -479,11 +518,10 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); - init_completion(&barr->done); debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head); + insert_work(cwq, &barr->work, head, 0); } static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) @@ -517,9 +555,6 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) * * We sleep until all works which were queued on entry have been handled, * but we are not livelocked by new incoming ones. - * - * This function used to run the workqueues itself. Now we just wait for the - * helper threads to do it. */ void flush_workqueue(struct workqueue_struct *wq) { @@ -558,7 +593,6 @@ int flush_work(struct work_struct *work) lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - prev = NULL; spin_lock_irq(&cwq->lock); if (!list_empty(&work->entry)) { /* @@ -567,22 +601,22 @@ int flush_work(struct work_struct *work) */ smp_rmb(); if (unlikely(cwq != get_wq_data(work))) - goto out; + goto already_gone; prev = &work->entry; } else { if (cwq->current_work != work) - goto out; + goto already_gone; prev = &cwq->worklist; } insert_wq_barrier(cwq, &barr, prev->next); -out: - spin_unlock_irq(&cwq->lock); - if (!prev) - return 0; + spin_unlock_irq(&cwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; +already_gone: + spin_unlock_irq(&cwq->lock); + return 0; } EXPORT_SYMBOL_GPL(flush_work); @@ -665,7 +699,7 @@ static void wait_on_work(struct work_struct *work) cpu_map = wq_cpu_map(wq); for_each_cpu(cpu, cpu_map) - wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + wait_on_cpu_work(get_cwq(cpu, wq), work); } static int __cancel_work_timer(struct work_struct *work, @@ -782,9 +816,8 @@ EXPORT_SYMBOL(schedule_delayed_work); void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { - struct cpu_workqueue_struct *cwq; - cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); - __queue_work(cwq, &dwork->work); + __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, + &dwork->work); put_cpu(); } flush_work(&dwork->work); @@ -991,13 +1024,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) - return NULL; + goto err; wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); - if (!wq->cpu_wq) { - kfree(wq); - return NULL; - } + if (!wq->cpu_wq) + goto err; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -1041,6 +1072,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = NULL; } return wq; +err: + if (wq) { + free_percpu(wq->cpu_wq); + kfree(wq); + } + return NULL; } EXPORT_SYMBOL_GPL(__create_workqueue_key); -- cgit v1.2.3 From 97e37d7b9e65a6ac939f796f91081135b7a08acc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: merge feature parameters into flags Currently, __create_workqueue_key() takes @singlethread and @freezeable paramters and store them separately in workqueue_struct. Merge them into a single flags parameter and field and use WQ_FREEZEABLE and WQ_SINGLE_THREAD. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 25 +++++++++++++++---------- kernel/workqueue.c | 17 +++++++---------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index e724dafc9e6d..d89cfc143b1a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -184,13 +184,17 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define work_clear_pending(work) \ clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) +enum { + WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ + WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */ +}; extern struct workqueue_struct * -__create_workqueue_key(const char *name, int singlethread, int freezeable, +__create_workqueue_key(const char *name, unsigned int flags, struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, singlethread, freezeable) \ +#define __create_workqueue(name, flags) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -200,19 +204,20 @@ __create_workqueue_key(const char *name, int singlethread, int freezeable, else \ __lock_name = #name; \ \ - __create_workqueue_key((name), (singlethread), \ - (freezeable), &__key, \ + __create_workqueue_key((name), (flags), &__key, \ __lock_name); \ }) #else -#define __create_workqueue(name, singlethread, freezeable) \ - __create_workqueue_key((name), (singlethread), (freezeable), \ - NULL, NULL) +#define __create_workqueue(name, flags) \ + __create_workqueue_key((name), (flags), NULL, NULL) #endif -#define create_workqueue(name) __create_workqueue((name), 0, 0) -#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) +#define create_workqueue(name) \ + __create_workqueue((name), 0) +#define create_freezeable_workqueue(name) \ + __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD) +#define create_singlethread_workqueue(name) \ + __create_workqueue((name), WQ_SINGLE_THREAD) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c56146a755e5..68e4dd808ec0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -67,11 +67,10 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { + unsigned int flags; /* I: WQ_* flags */ struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ const char *name; /* I: workqueue name */ - int singlethread; - int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -203,9 +202,9 @@ static const struct cpumask *cpu_singlethread_map __read_mostly; static cpumask_var_t cpu_populated_map __read_mostly; /* If it's single threaded, it isn't in the list of workqueues. */ -static inline int is_wq_single_threaded(struct workqueue_struct *wq) +static inline bool is_wq_single_threaded(struct workqueue_struct *wq) { - return wq->singlethread; + return wq->flags & WQ_SINGLE_THREAD; } static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) @@ -463,7 +462,7 @@ static int worker_thread(void *__cwq) struct cpu_workqueue_struct *cwq = __cwq; DEFINE_WAIT(wait); - if (cwq->wq->freezeable) + if (cwq->wq->flags & WQ_FREEZEABLE) set_freezable(); for (;;) { @@ -1013,8 +1012,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) } struct workqueue_struct *__create_workqueue_key(const char *name, - int singlethread, - int freezeable, + unsigned int flags, struct lock_class_key *key, const char *lock_name) { @@ -1030,13 +1028,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (!wq->cpu_wq) goto err; + wq->flags = flags; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); - wq->singlethread = singlethread; - wq->freezeable = freezeable; INIT_LIST_HEAD(&wq->list); - if (singlethread) { + if (flags & WQ_SINGLE_THREAD) { cwq = init_cpu_workqueue(wq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); -- cgit v1.2.3 From 22df02bb3fab24af97bff4c69cc6fd8529fc66fe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: define masks for work flags and conditionalize STATIC flags Work flags are about to see more traditional mask handling. Define WORK_STRUCT_*_BIT as the bit position constant and redefine WORK_STRUCT_* as bit masks. Also, make WORK_STRUCT_STATIC_* flags conditional While at it, re-define these constants as enums and use WORK_STRUCT_STATIC instead of hard-coding 2 in WORK_DATA_STATIC_INIT(). Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 29 +++++++++++++++++++++-------- kernel/workqueue.c | 12 ++++++------ 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d89cfc143b1a..d60c5701ab45 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -22,12 +22,25 @@ typedef void (*work_func_t)(struct work_struct *work); */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) +enum { + WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */ +#endif + + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +#else + WORK_STRUCT_STATIC = 0, +#endif + + WORK_STRUCT_FLAG_MASK = 3UL, + WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, +}; + struct work_struct { atomic_long_t data; -#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ -#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ -#define WORK_STRUCT_FLAG_MASK (3UL) -#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) struct list_head entry; work_func_t func; #ifdef CONFIG_LOCKDEP @@ -36,7 +49,7 @@ struct work_struct { }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) -#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) +#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC) struct delayed_work { struct work_struct work; @@ -98,7 +111,7 @@ extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); static inline unsigned int work_static(struct work_struct *work) { - return *work_data_bits(work) & (1 << WORK_STRUCT_STATIC); + return *work_data_bits(work) & WORK_STRUCT_STATIC; } #else static inline void __init_work(struct work_struct *work, int onstack) { } @@ -167,7 +180,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * @work: The work item in question */ #define work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) /** * delayed_work_pending - Find out whether a delayable work item is currently @@ -182,7 +195,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * @work: The work item in question */ #define work_clear_pending(work) \ - clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) enum { WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68e4dd808ec0..5c49d762293b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -115,7 +115,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state) * statically initialized. We just make sure that it * is tracked in the object tracker. */ - if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { + if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { debug_object_init(work, &work_debug_descr); debug_object_activate(work, &work_debug_descr); return 0; @@ -232,7 +232,7 @@ static inline void set_wq_data(struct work_struct *work, BUG_ON(!work_pending(work)); atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | - (1UL << WORK_STRUCT_PENDING) | extra_flags); + WORK_STRUCT_PENDING | extra_flags); } /* @@ -330,7 +330,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { int ret = 0; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = 1; } @@ -380,7 +380,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -516,7 +516,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, * might deadlock. */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); - __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); + __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); debug_work_activate(&barr->work); @@ -628,7 +628,7 @@ static int try_to_grab_pending(struct work_struct *work) struct cpu_workqueue_struct *cwq; int ret = -1; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; /* -- cgit v1.2.3 From a62428c0ae54a39e411251e836c3fe3dc11a5f5f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: separate out process_one_work() Separate out process_one_work() out of run_workqueue(). This patch doesn't cause any behavior change. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 100 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5c49d762293b..8e3082b76c7f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -402,51 +402,73 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * process_one_work - process single work + * @cwq: cwq to process work for + * @work: work to process + * + * Process @work. This function contains all the logics necessary to + * process a single work including synchronization against and + * interaction with other workers on the same cpu, queueing and + * flushing. As long as context requirement is met, any worker can + * call this function to process a work. + * + * CONTEXT: + * spin_lock_irq(cwq->lock) which is released and regrabbed. + */ +static void process_one_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work) +{ + work_func_t f = work->func; +#ifdef CONFIG_LOCKDEP + /* + * It is permissible to free the struct work_struct from + * inside the function that is called from it, this we need to + * take into account for lockdep too. To avoid bogus "held + * lock freed" warnings as well as problems when looking into + * work->lockdep_map, make a copy and use that here. + */ + struct lockdep_map lockdep_map = work->lockdep_map; +#endif + /* claim and process */ + trace_workqueue_execution(cwq->thread, work); + debug_work_deactivate(work); + cwq->current_work = work; + list_del_init(&work->entry); + + spin_unlock_irq(&cwq->lock); + + BUG_ON(get_wq_data(work) != cwq); + work_clear_pending(work); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); + f(work); + lock_map_release(&lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + + if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { + printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " + "%s/0x%08x/%d\n", + current->comm, preempt_count(), task_pid_nr(current)); + printk(KERN_ERR " last function: "); + print_symbol("%s\n", (unsigned long)f); + debug_show_held_locks(current); + dump_stack(); + } + + spin_lock_irq(&cwq->lock); + + /* we're done with it, release */ + cwq->current_work = NULL; +} + static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - work_func_t f = work->func; -#ifdef CONFIG_LOCKDEP - /* - * It is permissible to free the struct work_struct - * from inside the function that is called from it, - * this we need to take into account for lockdep too. - * To avoid bogus "held lock freed" warnings as well - * as problems when looking into work->lockdep_map, - * make a copy and use that here. - */ - struct lockdep_map lockdep_map = work->lockdep_map; -#endif - trace_workqueue_execution(cwq->thread, work); - debug_work_deactivate(work); - cwq->current_work = work; - list_del_init(cwq->worklist.next); - spin_unlock_irq(&cwq->lock); - - BUG_ON(get_wq_data(work) != cwq); - work_clear_pending(work); - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_acquire(&lockdep_map); - f(work); - lock_map_release(&lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), - task_pid_nr(current)); - printk(KERN_ERR " last function: "); - print_symbol("%s\n", (unsigned long)f); - debug_show_held_locks(current); - dump_stack(); - } - - spin_lock_irq(&cwq->lock); - cwq->current_work = NULL; + process_one_work(cwq, work); } spin_unlock_irq(&cwq->lock); } -- cgit v1.2.3 From 64166699752006f1a23a9cf7c96ae36654ccfc2c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: temporarily remove workqueue tracing Strip tracing code from workqueue and remove workqueue tracing. This is temporary measure till concurrency managed workqueue is complete. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker --- include/trace/events/workqueue.h | 92 ---------------------------------------- kernel/trace/Kconfig | 11 ----- kernel/workqueue.c | 14 ++---- 3 files changed, 3 insertions(+), 114 deletions(-) delete mode 100644 include/trace/events/workqueue.h diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h deleted file mode 100644 index d6c974474e70..000000000000 --- a/include/trace/events/workqueue.h +++ /dev/null @@ -1,92 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM workqueue - -#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_WORKQUEUE_H - -#include -#include -#include - -DECLARE_EVENT_CLASS(workqueue, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(work_func_t, func) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->func = work->func; - ), - - TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, - __entry->thread_pid, __entry->func) -); - -DEFINE_EVENT(workqueue, workqueue_insertion, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work) -); - -DEFINE_EVENT(workqueue, workqueue_execution, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work) -); - -/* Trace the creation of one workqueue thread on a cpu */ -TRACE_EVENT(workqueue_creation, - - TP_PROTO(struct task_struct *wq_thread, int cpu), - - TP_ARGS(wq_thread, cpu), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(int, cpu) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->cpu = cpu; - ), - - TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, - __entry->thread_pid, __entry->cpu) -); - -TRACE_EVENT(workqueue_destruction, - - TP_PROTO(struct task_struct *wq_thread), - - TP_ARGS(wq_thread), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - ), - - TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) -); - -#endif /* _TRACE_WORKQUEUE_H */ - -/* This part must be outside protection */ -#include diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c4545b..a0d95c1f3f82 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -391,17 +391,6 @@ config KMEMTRACE If unsure, say N. -config WORKQUEUE_TRACER - bool "Trace workqueues" - select GENERIC_TRACER - help - The workqueue tracer provides some statistical information - about each cpu workqueue thread such as the number of the - works inserted and executed since their creation. It can help - to evaluate the amount of work each of them has to perform. - For example it can help a developer to decide whether he should - choose a per-cpu workqueue instead of a singlethreaded one. - config BLK_DEV_IO_TRACE bool "Support for tracing block IO actions" depends on SYSFS diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8e3082b76c7f..f7ab703285a6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,8 +33,6 @@ #include #include #include -#define CREATE_TRACE_POINTS -#include /* * Structure fields follow one of the following exclusion rules. @@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work) atomic_long_set(&work->data, work_static(work)); } -static inline -struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) { - return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); + return (void *)(atomic_long_read(&work->data) & + WORK_STRUCT_WQ_DATA_MASK); } /** @@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - trace_workqueue_insertion(cwq->thread, work); - /* we own @work, set data and link */ set_wq_data(work, cwq, extra_flags); @@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct lockdep_map lockdep_map = work->lockdep_map; #endif /* claim and process */ - trace_workqueue_execution(cwq->thread, work); debug_work_deactivate(work); cwq->current_work = work; list_del_init(&work->entry); @@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) return PTR_ERR(p); cwq->thread = p; - trace_workqueue_creation(cwq->thread, cpu); - return 0; } @@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ - trace_workqueue_destruction(cwq->thread); kthread_stop(cwq->thread); cwq->thread = NULL; } -- cgit v1.2.3 From 1537663f5763892cacf1409ac0efef1b4f332d1e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: kill cpu_populated_map Worker management is about to be overhauled. Simplify things by removing cpu_populated_map, creating workers for all possible cpus and making single threaded workqueues behave more like multi threaded ones. After this patch, all cwqs are always initialized, all workqueues are linked on the workqueues list and workers for all possibles cpus always exist. This also makes CPU hotplug support simpler - checking ->cpus_allowed before processing works in worker_thread() and flushing cwqs on CPU_POST_DEAD are enough. While at it, make get_cwq() always return the cwq for the specified cpu, add target_cwq() for cases where single thread distinction is necessary and drop all direct usage of per_cpu_ptr() on wq->cpu_wq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 173 ++++++++++++++++++----------------------------------- 1 file changed, 59 insertions(+), 114 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7ab703285a6..dc78956ccf03 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -55,6 +55,7 @@ struct cpu_workqueue_struct { struct list_head worklist; wait_queue_head_t more_work; struct work_struct *current_work; + unsigned int cpu; struct workqueue_struct *wq; /* I: the owning workqueue */ struct task_struct *thread; @@ -189,34 +190,19 @@ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; -static const struct cpumask *cpu_singlethread_map __read_mostly; -/* - * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD - * flushes cwq->worklist. This means that flush_workqueue/wait_on_work - * which comes in between can't use for_each_online_cpu(). We could - * use cpu_possible_map, the cpumask below is more a documentation - * than optimization. - */ -static cpumask_var_t cpu_populated_map __read_mostly; - -/* If it's single threaded, it isn't in the list of workqueues. */ -static inline bool is_wq_single_threaded(struct workqueue_struct *wq) -{ - return wq->flags & WQ_SINGLE_THREAD; -} -static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - return is_wq_single_threaded(wq) - ? cpu_singlethread_map : cpu_populated_map; + return per_cpu_ptr(wq->cpu_wq, cpu); } -static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, - struct workqueue_struct *wq) +static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - if (unlikely(is_wq_single_threaded(wq))) + if (unlikely(wq->flags & WQ_SINGLE_THREAD)) cpu = singlethread_cpu; - return per_cpu_ptr(wq->cpu_wq, cpu); + return get_cwq(cpu, wq); } /* @@ -279,7 +265,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); @@ -383,7 +369,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); + set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -495,6 +481,10 @@ static int worker_thread(void *__cwq) if (kthread_should_stop()) break; + if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed, + get_cpu_mask(cwq->cpu)))) + set_cpus_allowed_ptr(cwq->thread, + get_cpu_mask(cwq->cpu)); run_workqueue(cwq); } @@ -574,14 +564,13 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void flush_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_cpu(cpu, cpu_map) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); + for_each_possible_cpu(cpu) + flush_cpu_workqueue(get_cwq(cpu, wq)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -699,7 +688,6 @@ static void wait_on_work(struct work_struct *work) { struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - const struct cpumask *cpu_map; int cpu; might_sleep(); @@ -712,9 +700,8 @@ static void wait_on_work(struct work_struct *work) return; wq = cwq->wq; - cpu_map = wq_cpu_map(wq); - for_each_cpu(cpu, cpu_map) + for_each_possible_cpu(cpu) wait_on_cpu_work(get_cwq(cpu, wq), work); } @@ -972,7 +959,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); - cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); + cwq = get_cwq(cpu, keventd_wq); if (current == cwq->thread) ret = 1; @@ -980,26 +967,12 @@ int current_is_keventd(void) } -static struct cpu_workqueue_struct * -init_cpu_workqueue(struct workqueue_struct *wq, int cpu) -{ - struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - - cwq->wq = wq; - spin_lock_init(&cwq->lock); - INIT_LIST_HEAD(&cwq->worklist); - init_waitqueue_head(&cwq->more_work); - - return cwq; -} - static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct workqueue_struct *wq = cwq->wq; - const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; - p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); + p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); /* * Nobody can add the work_struct to this cwq, * if (caller is __create_workqueue) @@ -1031,8 +1004,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct lock_class_key *key, const char *lock_name) { + bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; - struct cpu_workqueue_struct *cwq; int err = 0, cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -1048,37 +1021,37 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); - if (flags & WQ_SINGLE_THREAD) { - cwq = init_cpu_workqueue(wq, singlethread_cpu); - err = create_workqueue_thread(cwq, singlethread_cpu); - start_workqueue_thread(cwq, -1); - } else { - cpu_maps_update_begin(); - /* - * We must place this wq on list even if the code below fails. - * cpu_down(cpu) can remove cpu from cpu_populated_map before - * destroy_workqueue() takes the lock, in that case we leak - * cwq[cpu]->thread. - */ - spin_lock(&workqueue_lock); - list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); - /* - * We must initialize cwqs for each possible cpu even if we - * are going to call destroy_workqueue() finally. Otherwise - * cpu_up() can hit the uninitialized cwq once we drop the - * lock. - */ - for_each_possible_cpu(cpu) { - cwq = init_cpu_workqueue(wq, cpu); - if (err || !cpu_online(cpu)) - continue; - err = create_workqueue_thread(cwq, cpu); + cpu_maps_update_begin(); + /* + * We must initialize cwqs for each possible cpu even if we + * are going to call destroy_workqueue() finally. Otherwise + * cpu_up() can hit the uninitialized cwq once we drop the + * lock. + */ + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + cwq->wq = wq; + cwq->cpu = cpu; + spin_lock_init(&cwq->lock); + INIT_LIST_HEAD(&cwq->worklist); + init_waitqueue_head(&cwq->more_work); + + if (err) + continue; + err = create_workqueue_thread(cwq, cpu); + if (cpu_online(cpu) && !singlethread) start_workqueue_thread(cwq, cpu); - } - cpu_maps_update_done(); + else + start_workqueue_thread(cwq, -1); } + spin_lock(&workqueue_lock); + list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); + + cpu_maps_update_done(); + if (err) { destroy_workqueue(wq); wq = NULL; @@ -1128,17 +1101,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) */ void destroy_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); + cpu_maps_update_done(); - for_each_cpu(cpu, cpu_map) - cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); - cpu_maps_update_done(); + for_each_possible_cpu(cpu) + cleanup_workqueue_thread(get_cwq(cpu, wq)); free_percpu(wq->cpu_wq); kfree(wq); @@ -1152,48 +1124,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - int err = 0; action &= ~CPU_TASKS_FROZEN; - switch (action) { - case CPU_UP_PREPARE: - cpumask_set_cpu(cpu, cpu_populated_map); - } -undo: list_for_each_entry(wq, &workqueues, list) { - cwq = per_cpu_ptr(wq->cpu_wq, cpu); + if (wq->flags & WQ_SINGLE_THREAD) + continue; - switch (action) { - case CPU_UP_PREPARE: - err = create_workqueue_thread(cwq, cpu); - if (!err) - break; - printk(KERN_ERR "workqueue [%s] for %i failed\n", - wq->name, cpu); - action = CPU_UP_CANCELED; - err = -ENOMEM; - goto undo; - - case CPU_ONLINE: - start_workqueue_thread(cwq, cpu); - break; + cwq = get_cwq(cpu, wq); - case CPU_UP_CANCELED: - start_workqueue_thread(cwq, -1); + switch (action) { case CPU_POST_DEAD: - cleanup_workqueue_thread(cwq); + lock_map_acquire(&cwq