summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-11-03 21:14:39 +0100
committerJens Axboe <jens.axboe@oracle.com>2009-11-03 21:14:39 +0100
commit2058297d2d045cb57138c33b87cfabcc80e65186 (patch)
tree7ccffd0e162cbd7471f643561e79f23abb989a62 /fs
parent150e6c67f4bf6ab51e62defc41bd19a2eefe5709 (diff)
parent4b27e1bb442e964903f8a3fa6bdf33a602dc0941 (diff)
Merge branch 'for-linus' into for-2.6.33
Conflicts: block/cfq-iosched.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/bio.c28
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/async-thread.c81
-rw-r--r--fs/btrfs/async-thread.h10
-rw-r--r--fs/btrfs/btrfs_inode.h18
-rw-r--r--fs/btrfs/ctree.h19
-rw-r--r--fs/btrfs/disk-io.c50
-rw-r--r--fs/btrfs/extent-tree.c252
-rw-r--r--fs/btrfs/extent_io.c42
-rw-r--r--fs/btrfs/extent_io.h18
-rw-r--r--fs/btrfs/file.c44
-rw-r--r--fs/btrfs/inode.c144
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--fs/btrfs/ordered-data.c6
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/transaction.c45
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-log.c56
-rw-r--r--fs/btrfs/tree-log.h3
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/dlm/lowcomms.c36
-rw-r--r--fs/ecryptfs/Kconfig3
-rw-r--r--fs/ecryptfs/main.c7
-rw-r--r--fs/ext3/super.c13
-rw-r--r--fs/file.c1
-rw-r--r--fs/hfs/btree.c5
-rw-r--r--fs/hfsplus/wrapper.c4
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c1
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c15
-rw-r--r--fs/nfs/nfs4renewd.c6
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/super.c37
-rw-r--r--fs/notify/dnotify/dnotify.c3
-rw-r--r--fs/notify/inode_mark.c6
-rw-r--r--fs/notify/notification.c2
-rw-r--r--fs/pipe.c41
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/romfs/storage.c4
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c38
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c41
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c59
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c36
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c1
-rw-r--r--fs/xfs/xfs_dfrag.c8
-rw-r--r--fs/xfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/xfs_ialloc.c1
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c18
-rw-r--r--fs/xfs/xfs_itable.c21
-rw-r--r--fs/xfs/xfs_vnodeops.c6
65 files changed, 933 insertions, 403 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index d4bf8caad8d0..2126078a38ed 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -135,8 +135,8 @@ config TMPFS_POSIX_ACL
config HUGETLBFS
bool "HugeTLB file system support"
- depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || \
- (S390 && 64BIT) || SYS_SUPPORTS_HUGETLBFS || BROKEN
+ depends on X86 || IA64 || PPC_BOOK3S_64 || SPARC64 || (S390 && 64BIT) || \
+ SYS_SUPPORTS_HUGETLBFS || BROKEN
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read
diff --git a/fs/bio.c b/fs/bio.c
index 402cb84a92a1..12da5db8682c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -325,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio)
* @gfp_mask: allocation mask to use
* @nr_iovecs: number of iovecs
*
- * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
- * contains __GFP_WAIT, the allocation is guaranteed to succeed.
+ * bio_alloc will allocate a bio and associated bio_vec array that can hold
+ * at least @nr_iovecs entries. Allocations will be done from the
+ * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
+ *
+ * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
+ * a bio. This is due to the mempool guarantees. To make this work, callers
+ * must never allocate more than 1 bio at a time from this pool. Callers
+ * that need to allocate more than 1 bio must always submit the previously
+ * allocated bio for IO before attempting to allocate a new one. Failure to
+ * do so can cause livelocks under memory pressure.
*
* RETURNS:
* Pointer to new bio on success, NULL on failure.
@@ -350,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio)
}
/**
- * bio_alloc - allocate a bio for I/O
+ * bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
*
* Description:
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before attempting to allocate a new one. Failure to
- * do so can cause livelocks under memory pressure.
+ * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
+ * %__GFP_WAIT, the allocation is guaranteed to succeed.
*
**/
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
@@ -407,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio);
*
* Description:
* Put a reference to a &struct bio, either one you have gotten with
- * bio_alloc or bio_get. The last put of a bio will free it.
+ * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
**/
void bio_put(struct bio *bio)
{
diff --git a/fs/block_dev.c b/fs/block_dev.c
index dde91e7e1c3a..73d6a735b8f3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1258,8 +1258,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
}
} else {
- put_disk(disk);
module_put(disk->fops->owner);
+ put_disk(disk);
disk = NULL;
if (bdev->bd_contains == bdev) {
if (bdev->bd_disk->fops->open) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 69b355ae7f49..361604244271 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
#include "btrfs_inode.h"
#include "xattr.h"
-#ifdef CONFIG_BTRFS_POSIX_ACL
+#ifdef CONFIG_BTRFS_FS_POSIX_ACL
static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
.set = btrfs_xattr_acl_access_set,
};
-#else /* CONFIG_BTRFS_POSIX_ACL */
+#else /* CONFIG_BTRFS_FS_POSIX_ACL */
int btrfs_acl_chmod(struct inode *inode)
{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
return 0;
}
-#endif /* CONFIG_BTRFS_POSIX_ACL */
+#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 282ca085c2fb..c0861e781cdb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,51 @@ struct btrfs_worker_thread {
};
/*
+ * btrfs_start_workers uses kthread_run, which can block waiting for memory
+ * for a very long time. It will actually throttle on page writeback,
+ * and so it may not make progress until after our btrfs worker threads
+ * process all of the pending work structs in their queue
+ *
+ * This means we can't use btrfs_start_workers from inside a btrfs worker
+ * thread that is used as part of cleaning dirty memory, which pretty much
+ * involves all of the worker threads.
+ *
+ * Instead we have a helper queue who never has more than one thread
+ * where we scheduler thread start operations. This worker_start struct
+ * is used to contain the work and hold a pointer to the queue that needs
+ * another worker.
+ */
+struct worker_start {
+ struct btrfs_work work;
+ struct btrfs_workers *queue;
+};
+
+static void start_new_worker_func(struct btrfs_work *work)
+{
+ struct worker_start *start;
+ start = container_of(work, struct worker_start, work);
+ btrfs_start_workers(start->queue, 1);
+ kfree(start);
+}
+
+static int start_new_worker(struct btrfs_workers *queue)
+{
+ struct worker_start *start;
+ int ret;
+
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return -ENOMEM;
+
+ start->work.func = start_new_worker_func;
+ start->queue = queue;
+ ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
+ if (ret)
+ kfree(start);
+ return ret;
+}
+
+/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
*/
@@ -118,11 +163,13 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
goto out;
workers->atomic_start_pending = 0;
- if (workers->num_workers >= workers->max_workers)
+ if (workers->num_workers + workers->num_workers_starting >=
+ workers->max_workers)
goto out;
+ workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- btrfs_start_workers(workers, 1);
+ start_new_worker(workers);
return;
out:
@@ -390,9 +437,11 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
/*
* simple init on struct btrfs_workers
*/
-void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
+void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
+ struct btrfs_workers *async_helper)
{
workers->num_workers = 0;
+ workers->num_workers_starting = 0;
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
INIT_LIST_HEAD(&workers->order_list);
@@ -404,14 +453,15 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
workers->name = name;
workers->ordered = 0;
workers->atomic_start_pending = 0;
- workers->atomic_worker_start = 0;
+ workers->atomic_worker_start = async_helper;
}
/*
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers,
+ int num_workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
@@ -444,6 +494,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
spin_unlock_irq(&workers->lock);
}
return 0;
@@ -452,6 +504,14 @@ fail:
return ret;
}
+int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+{
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting += num_workers;
+ spin_unlock_irq(&workers->lock);
+ return __btrfs_start_workers(workers, num_workers);
+}
+
/*
* run through the list and find a worker thread that doesn't have a lot
* to do right now. This can return null if we aren't yet at the thread
@@ -461,7 +521,10 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
struct list_head *next;
- int enforce_min = workers->num_workers < workers->max_workers;
+ int enforce_min;
+
+ enforce_min = (workers->num_workers + workers->num_workers_starting) <
+ workers->max_workers;
/*
* if we find an idle thread, don't move it to the end of the
@@ -509,15 +572,17 @@ again:
worker = next_worker(workers);
if (!worker) {
- if (workers->num_workers >= workers->max_workers) {
+ if (workers->num_workers + workers->num_workers_starting >=
+ workers->max_workers) {
goto fallback;
} else if (workers->atomic_worker_start) {
workers->atomic_start_pending = 1;
goto fallback;
} else {
+ workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
- btrfs_start_workers(workers, 1);
+ __btrfs_start_workers(workers, 1);
goto again;
}
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index fc089b95ec14..5077746cf85e 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -64,6 +64,8 @@ struct btrfs_workers {
/* current number of running workers */
int num_workers;
+ int num_workers_starting;
+
/* max number of workers allowed. changed by btrfs_start_workers */
int max_workers;
@@ -78,9 +80,10 @@ struct btrfs_workers {
/*
* are we allowed to sleep while starting workers or are we required
- * to start them at a later time?
+ * to start them at a later time? If we can't sleep, this indicates
+ * which queue we need to use to schedule thread creation.
*/
- int atomic_worker_start;
+ struct btrfs_workers *atomic_worker_start;
/* list with all the work threads. The workers on the idle thread
* may be actively servicing jobs, but they haven't yet hit the
@@ -109,7 +112,8 @@ struct btrfs_workers {
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
-void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
+void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
+ struct btrfs_workers *async_starter);
int btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work);
#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index a54d354cefcb..f6783a42f010 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -86,6 +86,12 @@ struct btrfs_inode {
* transid of the trans_handle that last modified this inode
*/
u64 last_trans;
+
+ /*
+ * log transid when this inode was last modified
+ */
+ u64 last_sub_trans;
+
/*
* transid that last logged this inode
*/
@@ -128,12 +134,14 @@ struct btrfs_inode {
u64 last_unlink_trans;
/*
- * These two counters are for delalloc metadata reservations. We keep
- * track of how many extents we've accounted for vs how many extents we
- * have.
+ * Counters to keep track of the number of extent item's we may use due
+ * to delalloc and such. outstanding_extents is the number of extent
+ * items we think we'll end up using, and reserved_extents is the number
+ * of extent items we've reserved metadata for.
*/
- int delalloc_reserved_extents;
- int delalloc_extents;
+ spinlock_t accounting_lock;
+ int reserved_extents;
+ int outstanding_extents;
/*
* ordered_data_close is set by truncate when a file that used
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index dd8ced9814c4..444b3e9b92a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -691,14 +691,17 @@ struct btrfs_space_info {
struct list_head list;
+ /* for controlling how we free up space for allocations */
+ wait_queue_head_t allocate_wait;
+ wait_queue_head_t flush_wait;
+ int allocating_chunk;
+ int flushing;
+
/* for block groups in our same type */
struct list_head block_groups;
spinlock_t lock;
struct rw_semaphore groups_sem;
atomic_t caching_threads;
-
- int allocating_chunk;
- wait_queue_head_t wait;
};
/*
@@ -907,6 +910,7 @@ struct btrfs_fs_info {
* A third pool does submit_bio to avoid deadlocking with the other
* two
*/
+ struct btrfs_workers generic_worker;
struct btrfs_workers workers;
struct btrfs_workers delalloc_workers;
struct btrfs_workers endio_workers;
@@ -914,6 +918,7 @@ struct btrfs_fs_info {
struct btrfs_workers endio_meta_write_workers;
struct btrfs_workers endio_write_workers;
struct btrfs_workers submit_workers;
+ struct btrfs_workers enospc_workers;
/*
* fixup workers take dirty pages that didn't properly go through
* the cow mechanism and make them safe to write. It happens
@@ -1004,7 +1009,10 @@ struct btrfs_root {
atomic_t log_writers;
atomic_t log_commit[2];
unsigned long log_transid;
+ unsigned long last_log_commit;
unsigned long log_batch;
+ pid_t log_start_pid;
+ bool log_multiple_pids;
u64 objectid;
u64 last_trans;
@@ -1145,6 +1153,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7)
#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
#define BTRFS_MOUNT_NOSSD (1 << 9)
+#define BTRFS_MOUNT_DISCARD (1 << 10)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2323,7 +2332,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
void btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
-extern struct dentry_operations btrfs_dentry_operations;
+extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -2366,7 +2375,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
int btrfs_sync_fs(struct super_block *sb, int wait);
/* acl.c */
-#ifdef CONFIG_BTRFS_POSIX_ACL
+#ifdef CONFIG_BTRFS_FS_POSIX_ACL
int btrfs_check_acl(struct inode *inode, int mask);
#else
#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index af0435f79fa6..02b6afbd7450 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -917,6 +917,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
atomic_set(&root->log_writers, 0);
root->log_batch = 0;
root->log_transid = 0;
+ root->last_log_commit = 0;
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -1087,6 +1088,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(root->log_root);
root->log_root = log_root;
root->log_transid = 0;
+ root->last_log_commit = 0;
return 0;
}
@@ -1746,21 +1748,25 @@ struct btrfs_root *open_ctree(struct super_block *sb,
err = -EINVAL;
goto fail_iput;
}
-printk("thread pool is %d\n", fs_info->thread_pool_size);
- /*
- * we need to start all the end_io workers up front because the
- * queue work function gets called at interrupt time, and so it
- * cannot dynamically grow.
- */
+
+ btrfs_init_workers(&fs_info->generic_worker,
+ "genwork", 1, NULL);
+
btrfs_init_workers(&fs_info->workers, "worker",
- fs_info->thread_pool_size);
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
- fs_info->thread_pool_size);
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->submit_workers, "submit",
min_t(u64, fs_devices->num_devices,
- fs_info->thread_pool_size));
+ fs_info->thread_pool_size),
+ &fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->enospc_workers, "enospc",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
/* a higher idle thresh on the submit workers makes it much more
* likely that bios will be send down in a sane order to the
@@ -1774,15 +1780,20 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
fs_info->delalloc_workers.idle_thresh = 2;
fs_info->delalloc_workers.ordered = 1;
- btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
+ btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_workers, "endio",
- fs_info->thread_pool_size);
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
- fs_info->thread_pool_size);
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_meta_write_workers,
- "endio-meta-write", fs_info->thread_pool_size);
+ "endio-meta-write", fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
- fs_info->thread_pool_size);
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
/*
* endios are largely parallel and should have a very
@@ -1794,12 +1805,8 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 2;
- fs_info->endio_workers.atomic_worker_start = 1;
- fs_info->endio_meta_workers.atomic_worker_start = 1;
- fs_info->endio_write_workers.atomic_worker_start = 1;
- fs_info->endio_meta_write_workers.atomic_worker_start = 1;
-
btrfs_start_workers(&fs_info->workers, 1);
+ btrfs_start_workers(&fs_info->generic_worker, 1);
btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->delalloc_workers, 1);
btrfs_start_workers(&fs_info->fixup_workers, 1);
@@ -1807,6 +1814,7 @@ printk("thread pool is %d\n", fs_info->thread_pool_size);
btrfs_start_workers(&fs_info->endio_meta_workers, 1);
btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
btrfs_start_workers(&fs_info->endio_write_workers, 1);
+ btrfs_start_workers(&fs_info->enospc_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2012,6 +2020,7 @@ fail_chunk_root:
free_extent_buffer(chunk_root->node);
free_extent_buffer(chunk_root->commit_root);
fail_sb_buffer:
+ btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
@@ -2020,6 +2029,7 @@ fail_sb_buffer:
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->enospc_workers);
fail_iput:
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
@@ -2437,6 +2447,7 @@ int close_ctree(struct btrfs_root *root)
iput(fs_info->btree_inode);
+ btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
@@ -2445,6 +2456,7 @@ int close_c