summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2012-03-12 16:03:00 +0100
committerDavid Sterba <dsterba@suse.cz>2012-03-22 11:52:54 +0100
commit79787eaab46121d4713ed03c8fc63b9ec3eaec76 (patch)
treeee6b17d0811ee54ab74a03aa4e0bb92769d2f12a
parent49b25e0540904be0bf558b84475c69d72e4de66e (diff)
btrfs: replace many BUG_ONs with proper error handling
btrfs currently handles most errors with BUG_ON. This patch is a work-in- progress but aims to handle most errors other than internal logic errors and ENOMEM more gracefully. This iteration prevents most crashes but can run into lockups with the page lock on occasion when the timing "works out." Signed-off-by: Jeff Mahoney <jeffm@suse.com>
-rw-r--r--fs/btrfs/compression.c24
-rw-r--r--fs/btrfs/ctree.c19
-rw-r--r--fs/btrfs/delayed-inode.c27
-rw-r--r--fs/btrfs/dir-item.c1
-rw-r--r--fs/btrfs/disk-io.c116
-rw-r--r--fs/btrfs/export.c2
-rw-r--r--fs/btrfs/extent-tree.c316
-rw-r--r--fs/btrfs/extent_io.c40
-rw-r--r--fs/btrfs/file-item.c13
-rw-r--r--fs/btrfs/file.c45
-rw-r--r--fs/btrfs/free-space-cache.c13
-rw-r--r--fs/btrfs/inode-item.c1
-rw-r--r--fs/btrfs/inode-map.c19
-rw-r--r--fs/btrfs/inode.c378
-rw-r--r--fs/btrfs/ioctl.c79
-rw-r--r--fs/btrfs/orphan.c2
-rw-r--r--fs/btrfs/relocation.c35
-rw-r--r--fs/btrfs/root-tree.c12
-rw-r--r--fs/btrfs/scrub.c3
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c36
-rw-r--r--fs/btrfs/tree-log.c79
-rw-r--r--fs/btrfs/volumes.c103
23 files changed, 980 insertions, 385 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index cfd158dccdec..d11afa67c7d8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -391,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
*/
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(bio);
@@ -420,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(bio);
return 0;
@@ -661,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
/*
* inc the count before we submit the bio so
@@ -674,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode,
comp_bio, sums);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
sums += (comp_bio->bi_size + root->sectorsize - 1) /
root->sectorsize;
ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio);
@@ -697,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio);
return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 651a26a6c651..e697afd18159 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -356,14 +356,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
} else {
@@ -373,7 +373,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
if (new_flags != 0) {
ret = btrfs_set_disk_extent_flags(trans, root,
@@ -390,9 +390,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, buf, 1, 1);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
clean_tree_block(trans, root, buf);
*last_ref = 1;
@@ -475,7 +475,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
- btrfs_std_error(root->fs_info, ret);
+ btrfs_abort_transaction(trans, root, ret);
return ret;
}
@@ -2713,7 +2713,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
path->nodes[1], slot - 1, &left);
if (ret) {
/* we hit -ENOSPC, but it isn't fatal here */
- ret = 1;
+ if (ret == -ENOSPC)
+ ret = 1;
goto out;
}
@@ -4017,7 +4018,7 @@ find_next_key:
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
- BUG_ON(!cur);
+ BUG_ON(!cur); /* -ENOMEM */
btrfs_tree_read_lock(cur);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6829590d0fbb..03e3748d84d0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
return NULL;
}
+/* Will return either the node or PTR_ERR(-ENOMEM) */
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct inode *inode)
{
@@ -1106,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
return 0;
}
-/* Called when committing the transaction. */
+/*
+ * Called when committing the transaction.
+ * Returns 0 on success.
+ * Returns < 0 on error and returns with an aborted transaction with any
+ * outstanding delayed items cleaned up.
+ */
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
+ if (trans->aborted)
+ return -EIO;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1128,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node) {
- root = curr_node->root;
- ret = btrfs_insert_delayed_items(trans, path, root,
+ curr_root = curr_node->root;
+ ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node);
if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, root,
- curr_node);
+ ret = btrfs_delete_delayed_items(trans, path,
+ curr_root, curr_node);
if (!ret)
- ret = btrfs_update_delayed_inode(trans, root, path,
- curr_node);
+ ret = btrfs_update_delayed_inode(trans, curr_root,
+ path, curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
+ btrfs_abort_transaction(trans, root, ret);
break;
}
@@ -1149,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
btrfs_free_path(path);
trans->block_rsv = block_rsv;
+
return ret;
}
@@ -1369,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
btrfs_wq_run_delayed_node(delayed_root, root, 0);
}
+/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 76743308bd9f..c1a074d0696f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -115,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* 'location' is the key to stuff into the directory item, 'type' is the
* type of the inode we're pointing to, and 'index' is the sequence number
* to use for the second index (if one is created).
+ * Will return 0 or -ENOMEM
*/
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, const char *name, int name_len,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6297a030ac50..16a0cada26c2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -98,6 +98,7 @@ struct async_submit_bio {
*/
u64 bio_offset;
struct btrfs_work work;
+ int error;
};
/*
@@ -405,7 +406,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
u64 found_start;
unsigned long len;
struct extent_buffer *eb;
- int ret;
+ int ret = -EIO;
tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -423,13 +424,20 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
WARN_ON(1);
+ ret = -ENOMEM;
goto out;
}
ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
btrfs_header_generation(eb));
- BUG_ON(ret);
+ if (ret) {
+ btrfs_printk(root->fs_info, KERN_WARNING
+ "Failed to checksum dirty buffer @ %llu[%lu]\n",
+ start, len);
+ goto err;
+ }
WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
+ ret = -EIO;
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
WARN_ON(1);
@@ -444,10 +452,11 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
goto err;
}
csum_tree_block(root, eb, 0);
+ ret = 0;
err:
free_extent_buffer(eb);
out:
- return 0;
+ return ret;
}
static int check_tree_block_fsid(struct btrfs_root *root,
@@ -718,11 +727,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
+ int ret;
async = container_of(work, struct async_submit_bio, work);
- async->submit_bio_start(async->inode, async->rw, async->bio,
- async->mirror_num, async->bio_flags,
- async->bio_offset);
+ ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+ async->mirror_num, async->bio_flags,
+ async->bio_offset);
+ if (ret)
+ async->error = ret;
}
static void run_one_async_done(struct btrfs_work *work)
@@ -743,6 +755,12 @@ static void run_one_async_done(struct btrfs_work *work)
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
+ /* If an error occured we just want to clean up the bio and move on */
+ if (async->error) {
+ bio_endio(async->bio, async->error);
+ return;
+ }
+
async->submit_bio_done(async->inode, async->rw, async->bio,
async->mirror_num, async->bio_flags,
async->bio_offset);
@@ -784,6 +802,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->bio_flags = bio_flags;
async->bio_offset = bio_offset;
+ async->error = 0;
+
atomic_inc(&fs_info->nr_async_submits);
if (rw & REQ_SYNC)
@@ -805,15 +825,18 @@ static int btree_csum_one_bio(struct bio *bio)
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
struct btrfs_root *root;
+ int ret = 0;
WARN_ON(bio->bi_vcnt <= 0);
while (bio_index < bio->bi_vcnt) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- csum_dirty_buffer(root, bvec->bv_page);
+ ret = csum_dirty_buffer(root, bvec->bv_page);
+ if (ret)
+ break;
bio_index++;
bvec++;
}
- return 0;
+ return ret;
}
static int __btree_submit_bio_start(struct inode *inode, int rw,
@@ -825,8 +848,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- btree_csum_one_bio(bio);
- return 0;
+ return btree_csum_one_bio(bio);
}
static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
@@ -1381,7 +1403,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
- BUG_ON(!root->node);
+ BUG_ON(!root->node); /* -ENOMEM */
out:
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
root->ref_cows = 1;
@@ -1618,7 +1640,6 @@ static int transaction_kthread(void *arg)
u64 transid;
unsigned long now;
unsigned long delay;
- int ret;
do {
delay = HZ * 30;
@@ -1642,11 +1663,12 @@ static int transaction_kthread(void *arg)
transid = cur->transid;
spin_unlock(&root->fs_info->trans_lock);
+ /* If the file system is aborted, this will always fail. */
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ goto sleep;
if (transid == trans->transid) {
- ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ btrfs_commit_transaction(trans, root);
} else {
btrfs_end_transaction(trans, root);
}
@@ -2289,7 +2311,7 @@ int open_ctree(struct super_block *sb,
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
- BUG_ON(!chunk_root->node);
+ BUG_ON(!chunk_root->node); /* -ENOMEM */
if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
sb->s_id);
@@ -2429,21 +2451,31 @@ retry_root_backup:
log_tree_root->node = read_tree_block(tree_root, bytenr,
blocksize,
generation + 1);
+ /* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(tree_root->fs_info, ret,
+ "Failed to recover log tree");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ goto fail_trans_kthread;
+ }
if (sb->s_flags & MS_RDONLY) {
- ret = btrfs_commit_super(tree_root);
- BUG_ON(ret);
+ ret = btrfs_commit_super(tree_root);
+ if (ret)
+ goto fail_trans_kthread;
}
}
ret = btrfs_find_orphan_roots(tree_root);
- BUG_ON(ret);
+ if (ret)
+ goto fail_trans_kthread;
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_fs_roots(fs_info);
- BUG_ON(ret);
+ if (ret) {
+ }
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
@@ -2863,6 +2895,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (total_errors > max_errors) {
printk(KERN_ERR "btrfs: %d errors while writing supers\n",
total_errors);
+
+ /* This shouldn't happen. FUA is masked off if unsupported */
BUG();
}
@@ -2879,9 +2913,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- printk(KERN_ERR "btrfs: %d errors while writing supers\n",
- total_errors);
- BUG();
+ btrfs_error(root->fs_info, -EIO,
+ "%d errors while writing supers", total_errors);
+ return -EIO;
}
return 0;
}
@@ -3014,14 +3048,21 @@ int btrfs_commit_super(struct btrfs_root *root)
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
- BUG_ON(ret);
+ if (ret)
+ return ret;
/* run commit again to drop the original snapshot */
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ return ret;
ret = btrfs_write_and_wait_transaction(NULL, root);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_error(root->fs_info, ret,
+ "Failed to sync btree inode to disk.");
+ return ret;
+ }
ret = write_ctree_super(NULL, root, 0);
return ret;
@@ -3366,8 +3407,8 @@ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
spin_unlock(&root->fs_info->ordered_extent_lock);
}
-static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
- struct btrfs_root *root)
+int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ struct btrfs_root *root)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -3376,6 +3417,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
delayed_refs = &trans->delayed_refs;
+again:
spin_lock(&delayed_refs->lock);
if (delayed_refs->num_entries == 0) {
spin_unlock(&delayed_refs->lock);
@@ -3397,6 +3439,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
+ spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
kfree(head->extent_op);
delayed_refs->num_heads--;
@@ -3404,8 +3447,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
delayed_refs->num_heads_ready--;
list_del_init(&head->cluster);
mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref(ref);
+ goto again;
}
-
spin_unlock(&delayed_refs->lock);
btrfs_put_delayed_ref(ref);
@@ -3649,6 +3693,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
return 0;
}
+static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
+ u64 start, u64 end,
+ struct extent_state *state)
+{
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ btrfs_error(fs_info, -EIO,
+ "Error occured while writing out btree at %llu", start);
+ return -EIO;
+}
+
static struct extent_io_ops btree_extent_io_ops = {
.write_cache_pages_lock_hook = btree_lock_page_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3656,4 +3711,5 @@ static struct extent_io_ops btree_extent_io_ops = {
.submit_bio_hook = btree_submit_bio_hook,
/* note we're sharing with inode.c for the merge bio hook */
.merge_bio_hook = btrfs_merge_bio_hook,
+ .writepage_io_failed_hook = btree_writepage_io_failed_hook,
};
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 5f77166fd01c..e887ee62b6d4 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
if (ret < 0)
goto fail;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) {
ret = -ENOENT;
goto fail;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cd6f8ae0a78d..4b3f1eedced0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -245,7 +245,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
cache->bytes_super += stripe_len;
ret = add_excluded_extent(root, cache->key.objectid,
stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -253,13 +253,13 @@ static int exclude_super_stripes(struct btrfs_root *root,
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
cache->key.objectid, bytenr,
0, &logical, &nr, &stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
while (nr--) {
cache->bytes_super += stripe_len;
ret = add_excluded_extent(root, logical[nr],
stripe_len);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
kfree(logical);
@@ -321,7 +321,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
total_added += size;
ret = btrfs_add_free_space(block_group, start,
size);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic error */
start = extent_end + 1;
} else {
break;
@@ -332,7 +332,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
size = end - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start, size);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM or logic error */
}
return total_added;
@@ -474,7 +474,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
int ret = 0;
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
- BUG_ON(!caching_ctl);
+ if (!caching_ctl)
+ return -ENOMEM;
INIT_LIST_HEAD(&caching_ctl->list);
mutex_init(&caching_ctl->mutex);
@@ -982,7 +983,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
ret = btrfs_next_leaf(root, path);
if (ret < 0)
return ret;
- BUG_ON(ret > 0);
+ BUG_ON(ret > 0); /* Corruption */
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1008,7 +1009,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
new_size + extra_size, 1);
if (ret < 0)
return ret;
- BUG_ON(ret);
+ BUG_ON(ret); /* Corruption */
btrfs_extend_item(trans, root, path, new_size);
@@ -1478,7 +1479,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
err = ret;
goto out;
}
- BUG_ON(ret);
+ if (ret && !insert) {
+ err = -ENOENT;
+ goto out;
+ }
+ BUG_ON(ret); /* Corruption */
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1831,6 +1836,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
bytenr, &num_bytes, &bbio, 0);
+ /* Error condition is -ENOMEM */
if (!ret) {
struct btrfs_bio_stripe *stripe = bbio->stripes;
int i;
@@ -1846,7 +1852,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
if (!ret)
discarded_bytes += stripe->length;
else if (ret != -EOPNOTSUPP)
- break;
+ break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
/*
* Just in case we get back EOPNOTSUPP for some reason,
@@ -1865,6 +1871,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
return ret;
}
+/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -1940,7 +1947,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
ret = insert_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
- BUG_ON(ret);
+ if (ret)
+ btrfs_abort_transaction(trans, root, ret);
out:
btrfs_free_path(path);
return err;
@@ -2027,6 +2035,9 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
int ret;
int err = 0;
+ if (trans->aborted)
+ return 0;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2124,7 +2135,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
- int ret;
+ int ret = 0;
+
+ if (trans->aborted)
+ return 0;
+
if (btrfs_delayed_ref_is_head(node)) {
struct btrfs_delayed_ref_head *head;
/*
@@ -2142,11 +2157,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
ret = btrfs_del_csums(trans, root,
node->bytenr,
node->num_bytes);
- BUG_ON(ret);
}
}
mutex_unlock(&head->mutex);
- return 0;
+ return ret;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
@@ -2193,6 +2207,10 @@ again:
return NULL;
}
+/*
+ * Returns 0 on success or if called with an already aborted transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
+ */
static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct list_head *cluster)
@@ -2281,9 +2299,13 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_delayed_extent_op(trans, root,
ref, extent_op);
- BUG_ON(ret);
kfree(extent_op);
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+ return ret;
+ }
+
goto next;
}
@@ -2304,11 +2326,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);
- BUG_ON(ret);
btrfs_put_delayed_ref(ref);
kfree(extent_op);
count++;
+
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+ return ret;
+ }
+
next:
do_chunk_alloc(trans, root->fs_info->extent_root,
2 * 1024 * 1024,
@@ -2343,6 +2370,9 @@ static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
* 0, which means to process everything in the tree at the start
* of the run (but not newly added entries), or it can be some target
* number you'd like to process.
+ *
+ * Returns 0 on success or if called with an aborted transaction
+ * Returns <0 on error and aborts the transaction
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count)
@@ -2358,6 +2388,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long num_refs = 0;
int consider_waiting;
+ /* We'll clean this up in btrfs_cleanup_transaction */
+ if (trans->aborted)
+ return 0;
+
if (root == root->fs_info->extent_root)
root = root->fs_info->tree_root;
@@ -2415,7 +2449,11 @@ again:
}
ret = run_clustered_refs(trans, root, &cluster);
- BUG_ON(ret < 0);
+ if (ret < 0) {
+ spin_unlock(&delayed_refs->lock);
+ btrfs_abort_transaction(trans, root, ret);
+ return ret;
+ }
count -= min_t(unsigned long, ret, count);
@@ -2580,7 +2618,7 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Corruption */
ret = -ENOENT;
if (path->slots[0] == 0)
@@ -2734,7 +2772,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
}
return 0;
fail:
- BUG();
return ret;
}
@@ -2763,7 +2800,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
if (ret < 0)
goto fail;
- BUG_ON(ret);
+ BUG_ON(ret); /* Corruption */
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -2771,8 +2808,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
fail:
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
return ret;
+ }
return 0;
}
@@ -2945,7 +2984,8 @@ again:
if (last == 0) {
err = btrfs_run_delayed_refs(trans, root,
(unsigned long)-1);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
}
cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -2972,7 +3012,9 @@ again:
last = cache->key.objectid + cache->key.offset;
err = write_one_cache_group(trans, root, path, cache);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
+
btrfs_put_block_group(cache);
}
@@ -2985,7 +3027,8 @@ again:
if (last == 0) {
err = btrfs_run_delayed_refs(trans, root,
(unsigned long)-1);
- BUG_ON(err);
+ if (err) /* File system offline */
+ goto out;
}
cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -3010,20 +3053,21 @@ again:
continue;
}
- btrfs_write_out_cache(root, trans, cache, path);
+ err = btrfs_write_out_cache(root, trans, cache, path);
/*
* If we didn't have an error then the cache state is still
* NEED_WRITE, so we can set it to WRITTEN.
*/
- if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
+ if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
cache->disk_cache_state = BTRFS_DC_WRITTEN;
last = cache->key.objectid + cache->key.offset;
btrfs_put_block_group(cache);
}
+out:
btrfs_free_path(path);
- return 0;
+ return err;
}
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3407,9 +3451,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
0, 0, &space_info);
- BUG_ON(ret);
+ BUG_ON(ret); /* -ENOMEM */
}
- BUG_ON(!space_info);
+ BUG_ON(!space_info); /* Logic error */
again:
spin_lock(&space_info->lock);
@@ -3674,8 +3718,10 @@ again:
ret = wait_event_interruptible(space_info->wait,
!space_info->flush);
/* Must have been interrupted, return */
- if (ret)
+ if (ret) {
+ printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
return -EINTR;
+ }
spin_lock(&space_info->lock);
}
@@ -3832,8 +3878,9 @@ out:
return ret;
}
-static struct