From 7d7ea89e756ea18a3b08cd396e2a4c0c12d473a8 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 21:20:41 -0400 Subject: ext4: refactor code to read the extent tree block Refactor out the code needed to read the extent tree block into a single read_extent_tree_block() function. In addition to simplifying the code, it also makes sure that we call the ext4_ext_load_extent tracepoint whenever we need to read an extent tree block from disk. Signed-off-by: "Theodore Ts'o" Reviewed-by: Zheng Liu --- fs/ext4/extents.c | 97 ++++++++++++++++++++++++------------------------------- 1 file changed, 43 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 72ba4705d4fa..a40be59ddce6 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -464,25 +464,39 @@ int ext4_ext_check_inode(struct inode *inode) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); } -static int __ext4_ext_check_block(const char *function, unsigned int line, - struct inode *inode, - struct ext4_extent_header *eh, - int depth, - struct buffer_head *bh) +static struct buffer_head * +__read_extent_tree_block(const char *function, unsigned int line, + struct inode *inode, ext4_fsblk_t pblk, int depth) { - int ret; + struct buffer_head *bh; + int err; + bh = sb_getblk(inode->i_sb, pblk); + if (unlikely(!bh)) + return ERR_PTR(-ENOMEM); + + if (!bh_uptodate_or_lock(bh)) { + trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); + err = bh_submit_read(bh); + if (err < 0) + goto errout; + } if (buffer_verified(bh)) - return 0; - ret = ext4_ext_check(inode, eh, depth); - if (ret) - return ret; + return bh; + err = __ext4_ext_check(function, line, inode, + ext_block_hdr(bh), depth); + if (err) + goto errout; set_buffer_verified(bh); - return ret; + return bh; +errout: + put_bh(bh); + return ERR_PTR(err); + } -#define ext4_ext_check_block(inode, eh, depth, bh) \ - __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) +#define read_extent_tree_block(inode, pblk, depth) \ + __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), (depth)) #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) @@ -748,20 +762,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[ppos].p_depth = i; path[ppos].p_ext = NULL; - bh = sb_getblk(inode->i_sb, path[ppos].p_block); - if (unlikely(!bh)) { - ret = -ENOMEM; + bh = read_extent_tree_block(inode, path[ppos].p_block, --i); + if (IS_ERR(bh)) { + ret = PTR_ERR(bh); goto err; } - if (!bh_uptodate_or_lock(bh)) { - trace_ext4_ext_load_extent(inode, block, - path[ppos].p_block); - ret = bh_submit_read(bh); - if (ret < 0) { - put_bh(bh); - goto err; - } - } + eh = ext_block_hdr(bh); ppos++; if (unlikely(ppos > depth)) { @@ -773,11 +779,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, } path[ppos].p_bh = bh; path[ppos].p_hdr = eh; - i--; - - ret = ext4_ext_check_block(inode, eh, i, bh); - if (ret < 0) - goto err; } path[ppos].p_depth = i; @@ -1412,29 +1413,21 @@ got_index: ix++; block = ext4_idx_pblock(ix); while (++depth < path->p_depth) { - bh = sb_bread(inode->i_sb, block); - if (bh == NULL) - return -EIO; - eh = ext_block_hdr(bh); /* subtract from p_depth to get proper eh_depth */ - if (ext4_ext_check_block(inode, eh, - path->p_depth - depth, bh)) { - put_bh(bh); - return -EIO; - } + bh = read_extent_tree_block(inode, block, + path->p_depth - depth); + if (IS_ERR(bh)) + return PTR_ERR(bh); + eh = ext_block_hdr(bh); ix = EXT_FIRST_INDEX(eh); block = ext4_idx_pblock(ix); put_bh(bh); } - bh = sb_bread(inode->i_sb, block); - if (bh == NULL) - return -EIO; + bh = read_extent_tree_block(inode, block, path->p_depth - depth); + if (IS_ERR(bh)) + return PTR_ERR(bh); eh = ext_block_hdr(bh); - if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { - put_bh(bh); - return -EIO; - } ex = EXT_FIRST_EXTENT(eh); found_extent: *logical = le32_to_cpu(ex->ee_block); @@ -2829,10 +2822,11 @@ again: ext_debug("move to level %d (block %llu)\n", i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); - bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); - if (!bh) { + bh = read_extent_tree_block(inode, + ext4_idx_pblock(path[i].p_idx), depth - i - 1); + if (IS_ERR(bh)) { /* should we reset i_size? */ - err = -EIO; + err = PTR_ERR(bh); break; } /* Yield here to deal with large extent trees. @@ -2842,11 +2836,6 @@ again: err = -EIO; break; } - if (ext4_ext_check_block(inode, ext_block_hdr(bh), - depth - i - 1, bh)) { - err = -EIO; - break; - } path[i + 1].p_bh = bh; /* save actual number of indexes since this -- cgit v1.2.3 From c349179b4808f7c8e1ff1b4dd967c047eefd24bc Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 21:21:41 -0400 Subject: ext4: print the block number of invalid extent tree blocks When we find an invalid extent tree block, report the block number of the bad block for debugging purposes. Signed-off-by: "Theodore Ts'o" Reviewed-by: Zheng Liu --- fs/ext4/extents.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a40be59ddce6..6e7b7d928f4a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -407,7 +407,7 @@ static int ext4_valid_extent_entries(struct inode *inode, static int __ext4_ext_check(const char *function, unsigned int line, struct inode *inode, struct ext4_extent_header *eh, - int depth) + int depth, ext4_fsblk_t pblk) { const char *error_msg; int max = 0; @@ -447,21 +447,21 @@ static int __ext4_ext_check(const char *function, unsigned int line, corrupted: ext4_error_inode(inode, function, line, 0, - "bad header/extent: %s - magic %x, " - "entries %u, max %u(%u), depth %u(%u)", - error_msg, le16_to_cpu(eh->eh_magic), - le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), - max, le16_to_cpu(eh->eh_depth), depth); - + "pblk %llu bad header/extent: %s - magic %x, " + "entries %u, max %u(%u), depth %u(%u)", + (unsigned long long) pblk, error_msg, + le16_to_cpu(eh->eh_magic), + le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), + max, le16_to_cpu(eh->eh_depth), depth); return -EIO; } -#define ext4_ext_check(inode, eh, depth) \ - __ext4_ext_check(__func__, __LINE__, inode, eh, depth) +#define ext4_ext_check(inode, eh, depth, pblk) \ + __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) int ext4_ext_check_inode(struct inode *inode) { - return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); + return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); } static struct buffer_head * @@ -484,7 +484,7 @@ __read_extent_tree_block(const char *function, unsigned int line, if (buffer_verified(bh)) return bh; err = __ext4_ext_check(function, line, inode, - ext_block_hdr(bh), depth); + ext_block_hdr(bh), depth, pblk); if (err) goto errout; set_buffer_verified(bh); @@ -2775,7 +2775,7 @@ again: path[0].p_hdr = ext_inode_hdr(inode); i = 0; - if (ext4_ext_check(inode, path[0].p_hdr, depth)) { + if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { err = -EIO; goto out; } -- cgit v1.2.3 From 3be78c73179c9347bdc0a92b2898063bd2300ff7 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 21:22:41 -0400 Subject: ext4: use unsigned int for es_status values Don't use an unsigned long long for the es_status flags; this requires that we pass 64-bit values around which is painful on 32-bit systems. Instead pass the extent status flags around using the low 4 bits of an unsigned int, and shift them into place when we are reading or writing es_pblk. Signed-off-by: "Theodore Ts'o" Reviewed-by: Zheng Liu --- fs/ext4/extents_status.c | 6 +++--- fs/ext4/extents_status.h | 47 +++++++++++++++++++++++++++-------------------- fs/ext4/inode.c | 6 +++--- 3 files changed, 33 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 91cb110da1b4..ded2615b63e0 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -263,7 +263,7 @@ void ext4_es_find_delayed_extent_range(struct inode *inode, if (tree->cache_es) { es1 = tree->cache_es; if (in_range(lblk, es1->es_lblk, es1->es_len)) { - es_debug("%u cached by [%u/%u) %llu %llx\n", + es_debug("%u cached by [%u/%u) %llu %x\n", lblk, es1->es_lblk, es1->es_len, ext4_es_pblock(es1), ext4_es_status(es1)); goto out; @@ -641,13 +641,13 @@ out: */ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, - unsigned long long status) + unsigned int status) { struct extent_status newes; ext4_lblk_t end = lblk + len - 1; int err = 0; - es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n", + es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", lblk, len, pblk, status, inode->i_ino); if (!len) diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index e936730cc5b0..d72af848f989 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -29,16 +29,26 @@ /* * These flags live in the high bits of extent_status.es_pblk */ -#define EXTENT_STATUS_WRITTEN (1ULL << 63) -#define EXTENT_STATUS_UNWRITTEN (1ULL << 62) -#define EXTENT_STATUS_DELAYED (1ULL << 61) -#define EXTENT_STATUS_HOLE (1ULL << 60) +#define ES_SHIFT 60 + +#define EXTENT_STATUS_WRITTEN (1 << 3) +#define EXTENT_STATUS_UNWRITTEN (1 << 2) +#define EXTENT_STATUS_DELAYED (1 << 1) +#define EXTENT_STATUS_HOLE (1 << 0) #define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \ EXTENT_STATUS_UNWRITTEN | \ EXTENT_STATUS_DELAYED | \ EXTENT_STATUS_HOLE) +#define ES_WRITTEN (1ULL << 63) +#define ES_UNWRITTEN (1ULL << 62) +#define ES_DELAYED (1ULL << 61) +#define ES_HOLE (1ULL << 60) + +#define ES_MASK (ES_WRITTEN | ES_UNWRITTEN | \ + ES_DELAYED | ES_HOLE) + struct ext4_sb_info; struct ext4_extent; @@ -60,7 +70,7 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree); extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, - unsigned long long status); + unsigned int status); extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len); extern void ext4_es_find_delayed_extent_range(struct inode *inode, @@ -72,32 +82,32 @@ extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex); static inline int ext4_es_is_written(struct extent_status *es) { - return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0; + return (es->es_pblk & ES_WRITTEN) != 0; } static inline int ext4_es_is_unwritten(struct extent_status *es) { - return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0; + return (es->es_pblk & ES_UNWRITTEN) != 0; } static inline int ext4_es_is_delayed(struct extent_status *es) { - return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0; + return (es->es_pblk & ES_DELAYED) != 0; } static inline int ext4_es_is_hole(struct extent_status *es) { - return (es->es_pblk & EXTENT_STATUS_HOLE) != 0; + return (es->es_pblk & ES_HOLE) != 0; } -static inline ext4_fsblk_t ext4_es_status(struct extent_status *es) +static inline unsigned int ext4_es_status(struct extent_status *es) { - return (es->es_pblk & EXTENT_STATUS_FLAGS); + return es->es_pblk >> ES_SHIFT; } static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es) { - return (es->es_pblk & ~EXTENT_STATUS_FLAGS); + return es->es_pblk & ~ES_MASK; } static inline void ext4_es_store_pblock(struct extent_status *es, @@ -105,19 +115,16 @@ static inline void ext4_es_store_pblock(struct extent_status *es, { ext4_fsblk_t block; - block = (pb & ~EXTENT_STATUS_FLAGS) | - (es->es_pblk & EXTENT_STATUS_FLAGS); + block = (pb & ~ES_MASK) | (es->es_pblk & ES_MASK); es->es_pblk = block; } static inline void ext4_es_store_status(struct extent_status *es, - unsigned long long status) + unsigned int status) { - ext4_fsblk_t block; - - block = (status & EXTENT_STATUS_FLAGS) | - (es->es_pblk & ~EXTENT_STATUS_FLAGS); - es->es_pblk = block; + es->es_pblk = (((ext4_fsblk_t) + (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) | + (es->es_pblk & ~ES_MASK)); } extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c2ca04e67a4f..0569c745475c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -553,7 +553,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, } if (retval > 0) { int ret; - unsigned long long status; + unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, @@ -653,7 +653,7 @@ found: if (retval > 0) { int ret; - unsigned long long status; + unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, @@ -1633,7 +1633,7 @@ add_delayed: set_buffer_delay(bh); } else if (retval > 0) { int ret; - unsigned long long status; + unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, -- cgit v1.2.3 From 107a7bd31ac003e42c0f966aa8e5b26947de6024 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 21:23:41 -0400 Subject: ext4: cache all of an extent tree's leaf block upon reading When we read in an extent tree leaf block from disk, arrange to have all of its entries cached. In nearly all cases the in-memory representation will be more compact than the on-disk representation in the buffer cache, and it allows us to get the information without having to traverse the extent tree for successive extents. Signed-off-by: "Theodore Ts'o" Reviewed-by: Zheng Liu --- fs/ext4/ext4.h | 14 +++++++- fs/ext4/extents.c | 87 +++++++++++++++++++++++++++++++++--------------- fs/ext4/extents_status.c | 37 +++++++++++++++++++- fs/ext4/extents_status.h | 3 ++ fs/ext4/migrate.c | 2 +- fs/ext4/move_extent.c | 2 +- 6 files changed, 114 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0ab26fbf3380..c74b1948feb0 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -560,6 +560,17 @@ enum { /* Do not put hole in extent cache */ #define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200 +/* + * The bit position of this flag must not overlap with any of the + * EXT4_GET_BLOCKS_*. It is used by ext4_ext_find_extent(), + * read_extent_tree_block(), ext4_split_extent_at(), + * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf() to + * indicate that the we shouldn't be caching the extents when reading + * from the extent tree while a truncate or punch hole operation + * is in progress. + */ +#define EXT4_EX_NOCACHE 0x0400 + /* * Flags used by ext4_free_blocks */ @@ -2684,7 +2695,8 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int); extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t, - struct ext4_ext_path *); + struct ext4_ext_path *, + int flags); extern void ext4_ext_drop_refs(struct ext4_ext_path *); extern int ext4_ext_check_inode(struct inode *inode); extern int ext4_find_delalloc_range(struct inode *inode, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 6e7b7d928f4a..08c1ac976479 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -466,7 +466,8 @@ int ext4_ext_check_inode(struct inode *inode) static struct buffer_head * __read_extent_tree_block(const char *function, unsigned int line, - struct inode *inode, ext4_fsblk_t pblk, int depth) + struct inode *inode, ext4_fsblk_t pblk, int depth, + int flags) { struct buffer_head *bh; int err; @@ -488,6 +489,32 @@ __read_extent_tree_block(const char *function, unsigned int line, if (err) goto errout; set_buffer_verified(bh); + /* + * If this is a leaf block, cache all of its entries + */ + if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { + struct ext4_extent_header *eh = ext_block_hdr(bh); + struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); + ext4_lblk_t prev = 0; + int i; + + for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { + unsigned int status = EXTENT_STATUS_WRITTEN; + ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); + int len = ext4_ext_get_actual_len(ex); + + if (prev && (prev != lblk)) + ext4_es_cache_extent(inode, prev, + lblk - prev, ~0, + EXTENT_STATUS_HOLE); + + if (ext4_ext_is_uninitialized(ex)) + status = EXTENT_STATUS_UNWRITTEN; + ext4_es_cache_extent(inode, lblk, len, + ext4_ext_pblock(ex), status); + prev = lblk + len; + } + } return bh; errout: put_bh(bh); @@ -495,8 +522,9 @@ errout: } -#define read_extent_tree_block(inode, pblk, depth) \ - __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), (depth)) +#define read_extent_tree_block(inode, pblk, depth, flags) \ + __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ + (depth), (flags)) #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) @@ -730,7 +758,7 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) struct ext4_ext_path * ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, - struct ext4_ext_path *path) + struct ext4_ext_path *path, int flags) { struct ext4_extent_header *eh; struct buffer_head *bh; @@ -762,7 +790,8 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[ppos].p_depth = i; path[ppos].p_ext = NULL; - bh = read_extent_tree_block(inode, path[ppos].p_block, --i); + bh = read_extent_tree_block(inode, path[ppos].p_block, --i, + flags); if (IS_ERR(bh)) { ret = PTR_ERR(bh); goto err; @@ -1199,7 +1228,8 @@ out: * if no free index is found, then it requests in-depth growing. */ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, - unsigned int flags, + unsigned int mb_flags, + unsigned int gb_flags, struct ext4_ext_path *path, struct ext4_extent *newext) { @@ -1221,7 +1251,7 @@ repeat: if (EXT_HAS_FREE_INDEX(curp)) { /* if we found index with free entry, then use that * entry: create all needed subtree and add new leaf */ - err = ext4_ext_split(handle, inode, flags, path, newext, i); + err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); if (err) goto out; @@ -1229,12 +1259,12 @@ repeat: ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), - path); + path, gb_flags); if (IS_ERR(path)) err = PTR_ERR(path); } else { /* tree is full, time to grow in depth */ - err = ext4_ext_grow_indepth(handle, inode, flags, newext); + err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext); if (err) goto out; @@ -1242,7 +1272,7 @@ repeat: ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), - path); + path, gb_flags); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; @@ -1415,7 +1445,7 @@ got_index: while (++depth < path->p_depth) { /* subtract from p_depth to get proper eh_depth */ bh = read_extent_tree_block(inode, block, - path->p_depth - depth); + path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); @@ -1424,7 +1454,7 @@ got_index: put_bh(bh); } - bh = read_extent_tree_block(inode, block, path->p_depth - depth); + bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); if (IS_ERR(bh)) return PTR_ERR(bh); eh = ext_block_hdr(bh); @@ -1786,7 +1816,7 @@ out: */ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, - struct ext4_extent *newext, int flag) + struct ext4_extent *newext, int gb_flags) { struct ext4_extent_header *eh; struct ext4_extent *ex, *fex; @@ -1795,7 +1825,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, int depth, len, err; ext4_lblk_t next; unsigned uninitialized = 0; - int flags = 0; + int mb_flags = 0; if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); @@ -1810,7 +1840,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, } /* try to insert block into found extent and return */ - if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) { + if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { /* * Try to see whether we should rather test the extent on @@ -1913,7 +1943,7 @@ prepend: if (next != EXT_MAX_BLOCKS) { ext_debug("next leaf block - %u\n", next); BUG_ON(npath != NULL); - npath = ext4_ext_find_extent(inode, next, NULL); + npath = ext4_ext_find_extent(inode, next, NULL, 0); if (IS_ERR(npath)) return PTR_ERR(npath); BUG_ON(npath->p_depth != path->p_depth); @@ -1932,9 +1962,10 @@ prepend: * There is no free space in the found leaf. * We're gonna add a new leaf in the tree. */ - if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL) - flags = EXT4_MB_USE_RESERVED; - err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); + if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) + mb_flags = EXT4_MB_USE_RESERVED; + err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, + path, newext); if (err) goto cleanup; depth = ext_depth(inode); @@ -2000,7 +2031,7 @@ has_space: merge: /* try to merge extents */ - if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) + if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(handle, inode, path, nearex); @@ -2043,7 +2074,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode, path = NULL; } - path = ext4_ext_find_extent(inode, block, path); + path = ext4_ext_find_extent(inode, block, path, 0); if (IS_ERR(path)) { up_read(&EXT4_I(inode)->i_data_sem); err = PTR_ERR(path); @@ -2705,7 +2736,7 @@ again: ext4_lblk_t ee_block; /* find extent for this block */ - path = ext4_ext_find_extent(inode, end, NULL); + path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) { ext4_journal_stop(handle); return PTR_ERR(path); @@ -2747,6 +2778,7 @@ again: */ err = ext4_split_extent_at(handle, inode, path, end + 1, split_flag, + EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_METADATA_NOFAIL); @@ -2823,7 +2855,8 @@ again: i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); bh = read_extent_tree_block(inode, - ext4_idx_pblock(path[i].p_idx), depth - i - 1); + ext4_idx_pblock(path[i].p_idx), depth - i - 1, + EXT4_EX_NOCACHE); if (IS_ERR(bh)) { /* should we reset i_size? */ err = PTR_ERR(bh); @@ -3170,7 +3203,7 @@ static int ext4_split_extent(handle_t *handle, * result in split of original leaf or extent zeroout. */ ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, map->m_lblk, path); + path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); @@ -3554,7 +3587,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, if (err < 0) goto out; ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, map->m_lblk, path); + path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; @@ -4041,7 +4074,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ - path = ext4_ext_find_extent(inode, map->m_lblk, NULL); + path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; @@ -4760,6 +4793,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, error = ext4_fill_fiemap_extents(inode, start_blk, len_blks, fieinfo); } - + ext4_es_lru_add(inode); return error; } diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index ded2615b63e0..1dc5df016e25 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -419,7 +419,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode, unsigned short ee_len; int depth, ee_status, es_status; - path = ext4_ext_find_extent(inode, es->es_lblk, NULL); + path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) return; @@ -683,6 +683,41 @@ error: return err; } +/* + * ext4_es_cache_extent() inserts information into the extent status + * tree if and only if there isn't information about the range in + * question already. + */ +void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, + ext4_lblk_t len, ext4_fsblk_t pblk, + unsigned int status) +{ + struct extent_status *es; + struct extent_status newes; + ext4_lblk_t end = lblk + len - 1; + + newes.es_lblk = lblk; + newes.es_len = len; + ext4_es_store_pblock(&newes, pblk); + ext4_es_store_status(&newes, status); + trace_ext4_es_cache_extent(inode, &newes); + + if (!len) + return; + + BUG_ON(end < lblk); + + write_lock(&EXT4_I(inode)->i_es_lock); + + es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); + if (es && ((es->es_lblk <= lblk) || (es->es_lblk <= end))) + goto out; + + __es_insert_extent(inode, &newes); +out: + write_unlock(&EXT4_I(inode)->i_es_lock); +} + /* * ext4_es_lookup_extent() looks up an extent in extent status tree. * diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index d72af848f989..3e83aef3653a 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -71,6 +71,9 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree); extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, unsigned int status); +extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, + ext4_lblk_t len, ext4_fsblk_t pblk, + unsigned int status); extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len); extern void ext4_es_find_delayed_extent_range(struct inode *inode, diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 49e8bdff9163..f99bdb8548b2 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -39,7 +39,7 @@ static int finish_range(handle_t *handle, struct inode *inode, newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); ext4_ext_store_pblock(&newext, lb->first_pblock); - path = ext4_ext_find_extent(inode, lb->first_block, NULL); + path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); if (IS_ERR(path)) { retval = PTR_ERR(path); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index e86dddbd8296..7fa4d855dbd5 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -37,7 +37,7 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock, int ret = 0; struct ext4_ext_path *path; - path = ext4_ext_find_extent(inode, lblock, *orig_path); + path = ext4_ext_find_extent(inode, lblock, *orig_path, EXT4_EX_NOCACHE); if (IS_ERR(path)) ret = PTR_ERR(path); else if (path[ext_depth(inode)].p_ext == NULL) -- cgit v1.2.3 From 7869a4a6c5caa7b2e5c41ccaf46eb3371f88eea7 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 22:05:14 -0400 Subject: ext4: add support for extent pre-caching Add a new fiemap flag which forces the all of the extents in an inode to be cached in the extent_status tree. This is critically important when using AIO to a preallocated file, since if we need to read in blocks from the extent tree, the io_submit(2) system call becomes synchronous, and the AIO is no longer "A", which is bad. In addition, for most files which have an external leaf tree block, the cost of caching the information in the extent status tree will be less than caching the entire 4k block in the buffer cache. So it is generally a win to keep the extent information cached. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 17 ++++++----- fs/ext4/extents.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++- fs/ext4/extents_status.c | 72 +++++++++++++++++++++++++++++++++-------------- fs/ext4/ioctl.c | 3 ++ 4 files changed, 136 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c74b1948feb0..635135e6148e 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -561,15 +561,16 @@ enum { #define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200 /* - * The bit position of this flag must not overlap with any of the - * EXT4_GET_BLOCKS_*. It is used by ext4_ext_find_extent(), + * The bit position of these flags must not overlap with any of the + * EXT4_GET_BLOCKS_*. They are used by ext4_ext_find_extent(), * read_extent_tree_block(), ext4_split_extent_at(), - * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf() to - * indicate that the we shouldn't be caching the extents when reading - * from the extent tree while a truncate or punch hole operation - * is in progress. + * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf(). + * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be + * caching the extents when reading from the extent tree while a + * truncate or punch hole operation is in progress. */ #define EXT4_EX_NOCACHE 0x0400 +#define EXT4_EX_FORCE_CACHE 0x0800 /* * Flags used by ext4_free_blocks @@ -601,6 +602,7 @@ enum { #define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent) #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) #define EXT4_IOC_SWAP_BOOT _IO('f', 17) +#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* @@ -1386,6 +1388,7 @@ enum { nolocking */ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ EXT4_STATE_ORDERED_MODE, /* data=ordered mode */ + EXT4_STATE_EXT_PRECACHED, /* extents have been precached */ }; #define EXT4_INODE_BIT_FNS(name, field, offset) \ @@ -2705,7 +2708,7 @@ extern int ext4_find_delalloc_range(struct inode *inode, extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk); extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); - +extern int ext4_ext_precache(struct inode *inode); /* move_extent.c */ extern void ext4_double_down_write_data_sem(struct inode *first, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 08c1ac976479..01838875fcaf 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -482,7 +482,7 @@ __read_extent_tree_block(const char *function, unsigned int line, if (err < 0) goto errout; } - if (buffer_verified(bh)) + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), depth, pblk); @@ -526,6 +526,71 @@ errout: __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ (depth), (flags)) +/* + * This function is called to cache a file's extent information in the + * extent status tree + */ +int ext4_ext_precache(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_ext_path *path = NULL; + struct buffer_head *bh; + int i = 0, depth, ret = 0; + + if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + return 0; /* not an extent-mapped inode */ + + down_read(&ei->i_data_sem); + depth = ext_depth(inode); + + path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), + GFP_NOFS); + if (path == NULL) { + up_read(&ei->i_data_sem); + return -ENOMEM; + } + + /* Don't cache anything if there are no external extent blocks */ + if (depth == 0) + goto out; + path[0].p_hdr = ext_inode_hdr(inode); + ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); + if (ret) + goto out; + path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); + while (i >= 0) { + /* + * If this is a leaf block or we've reached the end of + * the index block, go up + */ + if ((i == depth) || + path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { + brelse(path[i].p_bh); + path[i].p_bh = NULL; + i--; + continue; + } + bh = read_extent_tree_block(inode, + ext4_idx_pblock(path[i].p_idx++), + depth - i - 1, + EXT4_EX_FORCE_CACHE); + if (IS_ERR(bh)) { + ret = PTR_ERR(bh); + break; + } + i++; + path[i].p_bh = bh; + path[i].p_hdr = ext_block_hdr(bh); + path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); + } + ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); +out: + up_read(&ei->i_data_sem); + ext4_ext_drop_refs(path); + kfree(path); + return ret; +} + #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) { @@ -4766,6 +4831,12 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return error; } + if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { + error = ext4_ext_precache(inode); + if (error) + return error; + } + /* fallback to generic here if not in extents fmt */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return generic_block_fiemap(inode, fieinfo, start, len, diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 1dc5df016e25..0e88a367b535 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -710,11 +710,8 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, write_lock(&EXT4_I(inode)->i_es_lock); es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); - if (es && ((es->es_lblk <= lblk) || (es->es_lblk <= end))) - goto out; - - __es_insert_extent(inode, &newes); -out: + if (!es || es->es_lblk > end) + __es_insert_extent(inode, &newes); write_unlock(&EXT4_I(inode)->i_es_lock); } @@ -930,6 +927,12 @@ static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, eia = list_entry(a, struct ext4_inode_info, i_es_lru); eib = list_entry(b, struct ext4_inode_info, i_es_lru); + if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) && + !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED)) + return 1; + if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) && + ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED)) + return -1; if (eia->i_touch_when == eib->i_touch_when) return 0; if (time_after(eia->i_touch_when, eib->i_touch_when)) @@ -943,21 +946,13 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, { struct ext4_inode_info *ei; struct list_head *cur, *tmp; - LIST_HEAD(skiped); + LIST_HEAD(skipped); int ret, nr_shrunk = 0; + int retried = 0, skip_precached = 1, nr_skipped = 0; spin_lock(&sbi->s_es_lru_lock); - /* - * If the inode that is at the head of LRU list is newer than - * last_sorted time, that means that we need to sort this list. - */ - ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru); - if (sbi->s_es_last_sorted < ei->i_touch_when) { - list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); - sbi->s_es_last_sorted = jiffies; - } - +retry: list_for_each_safe(cur, tmp, &sbi->s_es_lru) { /* * If we have already reclaimed all extents from extent @@ -968,9 +963,16 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, ei = list_entry(cur, struct ext4_inode_info, i_es_lru); - /* Skip the inode that is newer than the last_sorted time */ - if (sbi->s_es_last_sorted < ei->i_touch_when) { - list_move_tail(cur, &skiped); + /* + * Skip the inode that is newer than the last_sorted + * time. Normally we try hard to avoid shrinking + * precached inodes, but we will as a last resort. + */ + if ((sbi->s_es_last_sorted < ei->i_touch_when) || + (skip_precached && ext4_test_inode_state(&ei->vfs_inode, + EXT4_STATE_EXT_PRECACHED))) { + nr_skipped++; + list_move_tail(cur, &skipped); continue; } @@ -990,11 +992,33 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, } /* Move the newer inodes into the tail of the LRU list. */ - list_splice_tail(&skiped, &sbi->s_es_lru); + list_splice_tail(&skipped, &sbi->s_es_lru); + INIT_LIST_HEAD(&skipped); + + /* + * If we skipped any inodes, and we weren't able to make any + * forward progress, sort the list and try again. + */ + if ((nr_shrunk == 0) && nr_skipped && !retried) { + retried++; + list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); + sbi->s_es_last_sorted = jiffies; + ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, + i_es_lru); + /* + * If there are no non-precached inodes left on the + * list, start releasing precached extents. + */ + if (ext4_test_inode_state(&ei->vfs_inode, + EXT4_STATE_EXT_PRECACHED)) + skip_precached = 0; + goto retry; + } + spin_unlock(&sbi->s_es_lru_lock); if (locked_ei && nr_shrunk == 0) - nr_shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); + nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); return nr_shrunk; } @@ -1069,10 +1093,16 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, struct rb_node *node; struct extent_status *es; int nr_shrunk = 0; + static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); if (ei->i_es_lru_nr == 0) return 0; + if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && + __ratelimit(&_rs)) + ext4_warning(inode->i_sb, "forced shrink of precached extents"); + node = rb_first(&tree->root); while (node != NULL) { es = rb_entry(node, struct extent_status, rb_node); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index c0427e2f6648..5498f75a1648 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -624,6 +624,8 @@ resizefs_out: return 0; } + case EXT4_IOC_PRECACHE_EXTENTS: + return ext4_ext_precache(inode); default: return -ENOTTY; @@ -688,6 +690,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC_MOVE_EXT: case FITRIM: case EXT4_IOC_RESIZE_FS: + case EXT4_IOC_PRECACHE_EXTENTS: break; default: return -ENOIOCTLCMD; -- cgit v1.2.3 From 5b61de757535095c99212c1ed857c3a0e0bbe386 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 22:06:14 -0400 Subject: ext4: start handle at least possible moment when renaming files In ext4_rename(), don't start the journal handle until the the directory entries have been successfully looked up. Signed-off-by: "Theodore Ts'o" --- fs/ext4/namei.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 35f55a0dbc4b..d9b721c172a6 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3009,7 +3009,7 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - handle_t *handle; + handle_t *handle = NULL; struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh, *dir_bh; struct ext4_dir_entry_2 *old_de, *new_de; @@ -3026,14 +3026,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, * in separate transaction */ if (new_dentry->d_inode) dquot_initialize(new_dentry->d_inode); - handle = ext4_journal_start(old_dir, EXT4_HT_DIR, - (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); - if (IS_ERR(handle)) - return PTR_ERR(handle); - - if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) - ext4_handle_sync(handle); old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL); /* @@ -3056,6 +3048,16 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, new_bh = NULL; } } + + handle = ext4_journal_start(old_dir, EXT4_HT_DIR, + (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) + ext4_handle_sync(handle); + if (S_ISDIR(old_inode->i_mode)) { if (new_inode) { retval = -ENOTEMPTY; @@ -3195,7 +3197,8 @@ end_rename: brelse(dir_bh); brelse(old_bh); brelse(new_bh); - ext4_journal_stop(handle); + if (handle) + ext4_journal_stop(handle); if (retval == 0 && force_da_alloc) ext4_alloc_da_blocks(old_inode); return retval; -- cgit v1.2.3 From 0e20270454e45ff54c9f8546159924038e31bfa0 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 22:06:53 -0400 Subject: ext4: allocate delayed allocation blocks before rename When ext4_rename() overwrites an already existing file, call ext4_alloc_da_blocks() before starting the journal handle which actually does the rename, instead of doing this afterwards. This improves the likelihood that the contents will survive a crash if an application replaces a file using the sequence: 1) write replacement contents to foo.new 2) 3) rename foo.new to foo It is still not a guarantee, since ext4_alloc_da_blocks() is *not* doing a file integrity sync; this means if foo.new is a very large file, it may not be completely flushed out to disk. However, for files smaller than a megabyte or so, any dirty pages should be flushed out before we do the rename operation, and so at the next journal commit, the CACHE FLUSH command will make sure al of these pages are safely on the disk platter. Signed-off-by: "Theodore Ts'o" --- fs/ext4/namei.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index d9b721c172a6..1bec5a5c1e45 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3005,6 +3005,10 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. + * + * n.b. old_{dentry,inode) refers to the source dentry/inode + * while new_{dentry,inode) refers to the destination dentry/inode + * This comes from rename(const char *oldpath, const char *newpath) */ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) @@ -3013,7 +3017,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh, *dir_bh; struct ext4_dir_entry_2 *old_de, *new_de; - int retval, force_da_alloc = 0; + int retval; int inlined = 0, new_inlined = 0; struct ext4_dir_entry_2 *parent_de; @@ -3048,6 +3052,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, new_bh = NULL; } } + if (new_inode && !test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC)) + ext4_alloc_da_blocks(old_inode); handle = ext4_journal_start(old_dir, EXT4_HT_DIR, (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + @@ -3188,8 +3194,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ext4_mark_inode_dirty(handle, new_inode); if (!new_inode->i_nlink) ext4_orphan_add(handle, new_inode); - if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC)) - force_da_alloc = 1; } retval = 0; @@ -3199,8 +3203,6 @@ end_rename: brelse(new_bh); if (handle) ext4_journal_stop(handle); - if (retval == 0 && force_da_alloc) - ext4_alloc_da_blocks(old_inode); return retval; } -- cgit v1.2.3 From 19883bd9658d0dc269fc228b1b39db3615f7c7b0 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 16 Aug 2013 22:06:55 -0400 Subject: ext4: avoid reusing recently deleted inodes in no journal mode In no journal mode, if an inode has recently been deleted, we shouldn't reuse it right away. Otherwise it's possible, after an unclean shutdown, to hit a situation where a recently deleted inode gets reused for some other purpose before the inode table block has been written to disk. However, if the directory entry has been updated, then the directory entry will be pointing at the old inode contents. E2fsck will make sure the file system is consistent after the unclean shutdown. However, if the recently deleted inode is a character mode device, or an inode with the immutable bit set, even after the file system has been fixed up by e2fsck, it can be possible for a *.pyc file to be pointing at a character mode device, and when python tries to open the *.pyc file, Hilarity Ensues. We could change all of userspace to be very suspicious about stat'ing files before opening them, and clearing the immutable flag if necessary --- or we can just avoid reusing an inode number if it has been recently deleted. Google-Bug-Id: 10017573 Signed-off-by: "Theodore Ts'o" --- fs/ext4/ialloc.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'fs') diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 8bf5999875ee..666a5ed48bcc 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -624,6 +624,51 @@ static int find_group_other(struct super_block *sb, struct inode *parent, return -1; } +/* + * In no journal mode, if an inode has recently been deleted, we want + * to avoid reusing it until we're reasonably sure the inode table + * block has been written back to disk. (Yes, these values are + * somewhat arbitrary...) + */ +#define RECENTCY_MIN 5 +#define RECENTCY_DIRTY 30 + +static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino) +{ + struct ext4_group_desc *gdp; + struct ext4_inode *raw_inode; + struct buffer_head *bh; + unsigned long dtime, now; + int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; + int offset, ret = 0, recentcy = RECENTCY_MIN; + + gdp = ext4_get_group_desc(sb, group, NULL); + if (unlikely(!gdp)) + return 0; + + bh = sb_getblk(sb, ext4_inode_table(sb, gdp) + + (ino / inodes_per_block)); + if (unlikely(!bh) || !buffer_uptodate(bh)) + /* + * If the block is not in the buffer cache, then it + * must have been written out. + */ + goto out; + + offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb); + raw_inode = (struct ext4_inode *) (bh->b_data + offset); + dtime = le32_to_cpu(raw_inode->i_dtime); + now = get_seconds(); + if (buffer_dirty(bh)) + recentcy += RECENTCY_DIRTY; + + if (dtime && (dtime < now) && (now < dtime + recentcy)) + ret = 1; +out: + brelse(bh); + return ret; +} + /* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both @@ -741,6 +786,11 @@ repeat_in_this_group: "inode=%lu", ino + 1); continue; } + if ((EXT4_SB(sb)->s_journal == NULL) && + recently_deleted(sb, group, ino)) { + ino++; + goto next_inode; + } if (!handle) { BUG_ON(nblocks <= 0); handle = __ext4_journal_start_sb(dir->i_sb, line_no, @@ -764,6 +814,7 @@ repeat_in_this_group: ino++; /* the inode bitmap is zero-based */ if (!ret2) goto got; /* we grabbed the inode! */ +next_inode: if (ino < EXT4_INODES_PER_GROUP(sb)) goto repeat_in_this_group; next_group: -- cgit v1.2.3 From 1c8924eb106c1ac755d5d35ce9b3ff42e89e2511 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 17 Aug 2013 09:32:32 -0400 Subject: quota: provide interface for readding allocated space into reserved space ext4 needs to convert allocated (metadata) blocks back into blocks reserved for delayed allocation. Add functions into quota code for supporting such operation. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/quota/dquot.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ fs/stat.c | 11 +++++++++-- 2 files changed, 55 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index fbad622841f9..9a702e193538 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1094,6 +1094,14 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) dquot->dq_dqb.dqb_rsvspace -= number; } +static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number) +{ + if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) + number = dquot->dq_dqb.dqb_curspace; + dquot->dq_dqb.dqb_rsvspace += number; + dquot->dq_dqb.dqb_curspace -= number; +} + static inline void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) { @@ -1528,6 +1536,15 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number) } EXPORT_SYMBOL(inode_claim_rsv_space); +void inode_reclaim_rsv_space(struct inode *inode, qsize_t number) +{ + spin_lock(&inode->i_lock); + *inode_reserved_space(inode) += number; + __inode_sub_bytes(inode, number); + spin_unlock(&inode->i_lock); +} +EXPORT_SYMBOL(inode_reclaim_rsv_space); + void inode_sub_rsv_space(struct inode *inode, qsize_t number) { spin_lock(&inode->i_lock); @@ -1701,6 +1718,35 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) } EXPORT_SYMBOL(dquot_claim_space_nodirty); +/* + * Convert allocated space back to in-memory reserved quotas + */ +void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) +{ + int cnt; + + if (!dquot_active(inode)) { + inode_reclaim_rsv_space(inode, number); + return; + } + + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + spin_lock(&dq_data_lock); + /* Claim reserved quotas to allocated quotas */ + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (inode->i_dquot[cnt]) + dquot_reclaim_reserved_space(inode->i_dquot[cnt], + number); + } + /* Update inode bytes */ + inode_reclaim_rsv_space(inode, number); + spin_unlock(&dq_data_lock); + mark_all_dquot_dirty(inode->i_dquot); + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); + return; +} +EXPORT_SYMBOL(dquot_reclaim_space_nodirty); + /* * This operation can block, but only after everything is updated */ diff --git a/fs/stat.c b/fs/stat.c index 04ce1ac20d20..d0ea7ef75e26 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -447,9 +447,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes) EXPORT_SYMBOL(inode_add_bytes); -void inode_sub_bytes(struct inode *inode, loff_t bytes) +void __inode_sub_bytes(struct inode *inode, loff_t bytes) { - spin_lock(&inode->i_lock); inode->i_blocks -= bytes >> 9; bytes &= 511; if (inode->i_bytes < bytes) { @@ -457,6 +456,14 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes) inode->i_bytes += 512; } inode->i_bytes -= bytes; +} + +EXPORT_SYMBOL(__inode_sub_bytes); + +void inode_sub_bytes(struct inode *inode, loff_t bytes) +{ + spin_lock(&inode->i_lock); + __inode_sub_bytes(inode, bytes); spin_unlock(&inode->i_lock); } -- cgit v1.2.3 From 7d7345322d60edb0fa49a64a89b31360f01d09cb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 17 Aug 2013 09:36:54 -0400 Subject: ext4: fix warning in ext4_da_update_reserve_space() reaim workfile.dbase test easily triggers warning in ext4_da_update_reserve_space(): EXT4-fs warning (device ram0): ext4_da_update_reserve_space:365: ino 12, allocated 1 with only 0 reserved metadata blocks (releasing 1 blocks with reserved 9 data blocks) The problem is that (one of) tests creates file and then randomly writes to it with O_SYNC. That results in writing back pages of the file in random order so we create extents for written blocks say 0, 2, 4, 6, 8 - this last allocation also allocates new block for extents. Then we writeout block 1 so we have extents 0-2, 4, 6, 8 and we release indirect extent block because extents fit in the inode again. Then we writeout block 10 and we need to allocate indirect extent block again which triggers the warning because we don't have the reservation anymore. Fix the problem by giving back freed metadata blocks resulting from extent merging into inode's reservation pool. Signed-off-by: Jan Kara --- fs/ext4/ext4.h | 1 + fs/ext4/extents.c | 3 ++- fs/ext4/mballoc.c | 21 +++++++++++++++++---- 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 635135e6148e..58dede76f75f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -581,6 +581,7 @@ enum { #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 +#define EXT4_FREE_BLOCKS_RESERVE 0x0040 /* * ioctl commands diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 01838875fcaf..62b21ccea882 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1793,7 +1793,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle, brelse(path[1].p_bh); ext4_free_blocks(handle, inode, NULL, blk, 1, - EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); + EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET | + EXT4_FREE_BLOCKS_RESERVE); } /* diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 4bbbf13bd743..aa7d058e9e48 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4585,6 +4585,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *gd_bh; ext4_group_t block_group; struct ext4_sb_info *sbi; + struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_buddy e4b; unsigned int count_clusters; int err = 0; @@ -4784,7 +4785,6 @@ do_more: ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, block_group, gdp); ext4_unlock_group(sb, block_group); - percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); @@ -4792,10 +4792,23 @@ do_more: &sbi->s_flex_groups[flex_group].free_clusters); } - ext4_mb_unload_buddy(&e4b); - - if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) + if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { + percpu_counter_add(&sbi->s_dirtyclusters_counter, + count_clusters); + spin_lock(&ei->i_block_reservation_lock); + if (flags & EXT4_FREE_BLOCKS_METADATA) + ei->i_reserved_meta_blocks += count_clusters; + else + ei->i_reserved_data_blocks += count_clusters; + spin_unlock(&ei->i_block_reservation_lock); + if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) + dquot_reclaim_block(inode, + EXT4_C2B(sbi, count_clusters)); + } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); + percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); + + ext4_mb_unload_buddy(&e4b); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); -- cgit v1.2.3 From 09930042a2e94cf8ee79d22943915612c1e4ba51 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 17 Aug 2013 09:57:56 -0400 Subject: ext4: move test whether extent to map can be extended to one place Currently the logic whether the current buffer can be added to an extent of buffers to map is split between mpage_add_bh_to_extent() and add_page_bufs_to_extent(). Move the whole logic to mpage_add_bh_to_extent() which makes things a bit more straightforward and make following i_size fixes easier. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 49 +++++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 0569c745475c..787497d536b6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1904,34 +1904,48 @@ static int ext4_writepage(struct page *page, * * @mpd - extent of blocks * @lblk - logical number of the block in the file - * @b_state - b_state of the buffer head added + * @bh - buffer head we want to add to the extent * - * the function is used to collect contig. blocks in same state + * The function is used to collect contig. blocks in the same state. If the + * buffer doesn't require mapping for writeback and we haven't started the + * extent of buffers to map yet, the function returns 'true' immediately - the + * caller can write the buffer right away. Otherwise the function returns true + * if the block has been added to the extent, false if the block couldn't be + * added. */ -static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, - unsigned long b_state) +static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, + struct buffer_head *bh) { struct ext4_map_blocks *map = &mpd->map; - /* Don't go larger than mballoc is willing to allocate */ - if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) - return 0; + /* Buffer that doesn't need mapping for writeback? */ + if (!buffer_dirty(bh) || !buffer_mapped(bh) || + (!buffer_delay(bh) && !buffer_unwritten(bh))) { + /* So far no extent to map => we write the buffer right away */ + if (map->m_len == 0) + return true; + return false; + } /* First block in the extent? */ if (map->m_len == 0) { map->m_lblk = lblk; map->m_len = 1; - map->m_flags = b_state & BH_FLAGS; - return 1; + map->m_flags = bh->b_state & BH_FLAGS; + return true; } + /* Don't go larger than mballoc is willing to allocate */ + if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) + return false; + /* Can we merge the block to our big extent? */ if (lblk == map->m_lblk + map->m_len && - (b_state & BH_FLAGS) == map->m_flags) { + (bh->b_state & BH_FLAGS) == map->m_flags) { map->m_len++; - return 1; + return true; } - return 0; + return false; } static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, @@ -1946,18 +1960,13 @@ static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, do { BUG_ON(buffer_locked(bh)); - if (!buffer_dirty(bh) || !buffer_mapped(bh) || - (!buffer_delay(bh) && !buffer_unwritten(bh)) || - lblk >= blocks) { + if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) return false; - if (lblk >= blocks) - return true; - continue; + /* Everything mapped so far and we hit EOF */ + return true; } - if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state)) - return false; } while (lblk++, (bh = bh->b_this_page) != head); return true; } -- cgit v1.2.3 From 5f1132b2ba8c873f25982cf45917e8455fb6c962 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 17 Aug 2013 10:02:33 -0400 Subject: ext4: fix ext4_writepages() in presence of truncate Inode size can arbitrarily change while writeback is in progress. When ext4_writepages() has prepared a long extent for mapping and truncate then reduces i_size, mpage_map_and_submit_buffers() will always map just one buffer in a page instead of all of them due to lblk < blocks check. So we end up not using all blocks we've allocated (thus leaking them) and also delalloc accounting goes wrong manifesting as a warning like: ext4_da_release_space:1333: ext4_da_release_space: ino 12, to_free 1 with only 0 reserved data blocks Note that the problem can happen only when blocksize < pagesize because otherwise we have only a single buffer in the page. Fix the problem by removing the size check from the mapping loop. We have an extent allocated so we have to use it all before checking for i_size. We also rename add_page_bufs_to_extent() to mpage_process_page_bufs() and make that function submit the page for IO if all buffers (upto EOF) in it are mapped. Reported-by: Dave Jones Reported-by: Zheng Liu Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 107 ++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 66 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 787497d536b6..19fa2e076275 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1890,6 +1890,26 @@ static int ext4_writepage(struct page *page, return ret; } +static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) +{ + int len; + loff_t size = i_size_read(mpd->inode); + int err; + + BUG_ON(page->index != mpd->first_page); + if (page->index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + clear_page_dirty_for_io(page); + err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); + if (!err) + mpd->wbc->nr_to_write--; + mpd->first_page++; + + return err; +} + #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) /* @@ -1948,12 +1968,29 @@ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, return false; } -static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, - struct buffer_head *head, - struct buffer_head *bh, - ext4_lblk_t lblk) +/* + * mpage_process_page_bufs - submit page buffers for IO or add them to extent + * + * @mpd - extent of blocks for mapping + * @head - the first buffer in the page + * @bh - buffer we should start processing from + * @lblk - logical number of the block in the file corresponding to @bh + * + * Walk through page buffers from @bh upto @head (exclusive) and either submit + * the page for IO if all buffers in this page were mapped and there's no + * accumulated extent of buffers to map or add buffers in the page to the + * extent of buffers to map. The function returns 1 if the caller can continue + * by processing the next page, 0 if it should stop adding buffers to the + * extent to map because we cannot extend it anymore. It can also return value + * < 0 in case of error during IO submission. + */ +static int mpage_process_page_bufs(struct mpage_da_data *mpd, + struct buffer_head *head, + struct buffer_head *bh, + ext4_lblk_t lblk) { struct inode *inode = mpd->inode; + int err; ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) >> inode->i_blkbits; @@ -1963,32 +2000,18 @@ static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) - return false; + return 0; /* Everything mapped so far and we hit EOF */ - return true; + break; } } while (lblk++, (bh = bh->b_this_page) != head); - return true; -} - -static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) -{ - int len; - loff_t size = i_size_read(mpd->inode); - int err; - - BUG_ON(page->index != mpd->first_page); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; - else - len = PAGE_CACHE_SIZE; - clear_page_dirty_for_io(page); - err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); - if (!err) - mpd->wbc->nr_to_write--; - mpd->first_page++; - - return err; + /* So far everything mapped? Submit the page for IO. */ + if (mpd->map.m_len == 0) { + err = mpage_submit_page(mpd, head->b_page); + if (err < 0) + return err; + } + return lblk < blocks; } /* @@ -2012,8 +2035,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) struct inode *inode = mpd->inode; struct buffer_head *head, *bh; int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) - >> inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; sector_t pblock; @@ -2048,18 +2069,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) */ mpd->map.m_len = 0; mpd->map.m_flags = 0; - add_page_bufs_to_extent(mpd, head, bh, - lblk); + /* + * FIXME: If dioread_nolock supports + * blocksize < pagesize, we need to make + * sure we add size mapped so far to + * io_end->size as the following call + * can submit the page for IO. + */ + err = mpage_process_page_bufs(mpd, head, + bh, lblk); pagevec_release(&pvec); - return 0; + if (err > 0) + err = 0; + return err; }