summaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c38
-rw-r--r--fs/f2fs/compress.c26
-rw-r--r--fs/f2fs/compress.h0
-rw-r--r--fs/f2fs/data.c211
-rw-r--r--fs/f2fs/debug.c11
-rw-r--r--fs/f2fs/dir.c115
-rw-r--r--fs/f2fs/f2fs.h183
-rw-r--r--fs/f2fs/file.c436
-rw-r--r--fs/f2fs/gc.c4
-rw-r--r--fs/f2fs/hash.c11
-rw-r--r--fs/f2fs/inline.c11
-rw-r--r--fs/f2fs/inode.c3
-rw-r--r--fs/f2fs/namei.c1
-rw-r--r--fs/f2fs/node.c41
-rw-r--r--fs/f2fs/node.h4
-rw-r--r--fs/f2fs/recovery.c16
-rw-r--r--fs/f2fs/segment.c51
-rw-r--r--fs/f2fs/shrinker.c4
-rw-r--r--fs/f2fs/super.c72
-rw-r--r--fs/f2fs/sysfs.c14
21 files changed, 861 insertions, 393 deletions
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 306413589827..1e5e9b1136ee 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -384,7 +384,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
struct page *dpage)
{
struct posix_acl *default_acl = NULL, *acl = NULL;
- int error = 0;
+ int error;
error = f2fs_acl_create(dir, &inode->i_mode, &default_acl, &acl, dpage);
if (error)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 023462e80e58..897edb7c951a 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -37,7 +37,7 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
struct address_space *mapping = META_MAPPING(sbi);
- struct page *page = NULL;
+ struct page *page;
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
@@ -348,13 +348,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
goto skip_write;
/* if locked failed, cp will flush dirty pages instead */
- if (!mutex_trylock(&sbi->cp_mutex))
+ if (!down_write_trylock(&sbi->cp_global_sem))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc);
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
- mutex_unlock(&sbi->cp_mutex);
+ up_write(&sbi->cp_global_sem);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
@@ -1385,6 +1385,26 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
f2fs_submit_merged_write(sbi, META_FLUSH);
}
+static inline u64 get_sectors_written(struct block_device *bdev)
+{
+ return (u64)part_stat_read(bdev, sectors[STAT_WRITE]);
+}
+
+u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
+{
+ if (f2fs_is_multi_device(sbi)) {
+ u64 sectors = 0;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ sectors += get_sectors_written(FDEV(i).bdev);
+
+ return sectors;
+ }
+
+ return get_sectors_written(sbi->sb->s_bdev);
+}
+
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1395,7 +1415,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- struct super_block *sb = sbi->sb;
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
u64 kbytes_written;
int err;
@@ -1490,9 +1509,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Record write statistics in the hot node summary */
kbytes_written = sbi->kbytes_written;
- if (sb->s_bdev->bd_part)
- kbytes_written += BD_PART_WRITTEN(sbi);
-
+ kbytes_written += (f2fs_get_sectors_written(sbi) -
+ sbi->sectors_written_start) >> 1;
seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
if (__remain_node_summaries(cpc->reason)) {
@@ -1572,7 +1590,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_warn(sbi, "Start checkpoint disabled!");
}
if (cpc->reason != CP_RESIZE)
- mutex_lock(&sbi->cp_mutex);
+ down_write(&sbi->cp_global_sem);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
@@ -1600,7 +1618,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
goto out;
}
- if (NM_I(sbi)->dirty_nat_cnt == 0 &&
+ if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
SIT_I(sbi)->dirty_sentries == 0 &&
prefree_segments(sbi) == 0) {
f2fs_flush_sit_entries(sbi, cpc);
@@ -1647,7 +1665,7 @@ stop:
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
if (cpc->reason != CP_RESIZE)
- mutex_unlock(&sbi->cp_mutex);
+ up_write(&sbi->cp_global_sem);
return err;
}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 14262e0f1cd6..4bcbacfe3325 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -602,6 +602,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages;
struct page **new_cpages;
+ u32 chksum = 0;
int i, ret;
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@@ -655,6 +656,11 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->cbuf->clen = cpu_to_le32(cc->clen);
+ if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
+ chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
+ cc->cbuf->cdata, cc->clen);
+ cc->cbuf->chksum = cpu_to_le32(chksum);
+
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
@@ -790,6 +796,22 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
ret = cops->decompress_pages(dic);
+ if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
+ u32 provided = le32_to_cpu(dic->cbuf->chksum);
+ u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
+
+ if (provided != calculated) {
+ if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
+ set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
+ printk_ratelimited(
+ "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
+ KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
+ provided, calculated);
+ }
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
+ }
+
out_vunmap_cbuf:
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf:
@@ -798,8 +820,6 @@ destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
out_free_dic:
- if (verity)
- atomic_set(&dic->pending_pages, dic->nr_cpages);
if (!verity)
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
ret, false);
@@ -921,7 +941,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
static bool cluster_may_compress(struct compress_ctx *cc)
{
- if (!f2fs_compressed_file(cc->inode))
+ if (!f2fs_need_compress_data(cc->inode))
return false;
if (f2fs_is_atomic_file(cc->inode))
return false;
diff --git a/fs/f2fs/compress.h b/fs/f2fs/compress.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/fs/f2fs/compress.h
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index be4da52604ed..aa34d620bec9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -202,7 +202,7 @@ static void f2fs_verify_bio(struct bio *bio)
dic = (struct decompress_io_ctx *)page_private(page);
if (dic) {
- if (atomic_dec_return(&dic->pending_pages))
+ if (atomic_dec_return(&dic->verity_pages))
continue;
f2fs_verify_pages(dic->rpages,
dic->cluster_size);
@@ -736,6 +736,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
block_t last_blkaddr, block_t cur_blkaddr)
{
+ if (unlikely(sbi->max_io_bytes &&
+ bio->bi_iter.bi_size >= sbi->max_io_bytes))
+ return false;
if (last_blkaddr + 1 != cur_blkaddr)
return false;
return __same_bdev(sbi, cur_blkaddr, bio);
@@ -1027,7 +1030,8 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages, unsigned op_flag,
- pgoff_t first_idx, bool for_write)
+ pgoff_t first_idx, bool for_write,
+ bool for_verity)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
@@ -1049,7 +1053,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
post_read_steps |= 1 << STEP_DECRYPT;
if (f2fs_compressed_file(inode))
post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
- if (f2fs_need_verity(inode, first_idx))
+ if (for_verity && f2fs_need_verity(inode, first_idx))
post_read_steps |= 1 << STEP_VERITY;
if (post_read_steps) {
@@ -1079,7 +1083,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write);
+ page->index, for_write, true);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -1750,6 +1754,16 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
return true;
}
+static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
+{
+ return (bytes >> inode->i_blkbits);
+}
+
+static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
+{
+ return (blks << inode->i_blkbits);
+}
+
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
pgoff_t *next_pgofs, int seg_type, bool may_write)
@@ -1758,7 +1772,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
int err;
map.m_lblk = iblock;
- map.m_len = bh->b_size >> inode->i_blkbits;
+ map.m_len = bytes_to_blks(inode, bh->b_size);
map.m_next_pgofs = next_pgofs;
map.m_next_extent = NULL;
map.m_seg_type = seg_type;
@@ -1768,20 +1782,11 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = (u64)map.m_len << inode->i_blkbits;
+ bh->b_size = blks_to_bytes(inode, map.m_len);
}
return err;
}
-static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int flag,
- pgoff_t *next_pgofs)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- flag, next_pgofs,
- NO_CHECK_TYPE, create);
-}
-
static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -1800,24 +1805,6 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
false);
}
-static int get_data_block_bmap(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP, NULL,
- NO_CHECK_TYPE, create);
-}
-
-static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
-{
- return (offset >> inode->i_blkbits);
-}
-
-static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
-{
- return (blk << inode->i_blkbits);
-}
-
static int f2fs_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
@@ -1843,7 +1830,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
return err;
}
- phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ phys = blks_to_bytes(inode, ni.blk_addr);
offset = offsetof(struct f2fs_inode, i_addr) +
sizeof(__le32) * (DEF_ADDRS_PER_INODE -
get_inline_xattr_addrs(inode));
@@ -1875,7 +1862,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
return err;
}
- phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ phys = blks_to_bytes(inode, ni.blk_addr);
len = inode->i_sb->s_blocksize;
f2fs_put_page(page, 1);
@@ -1913,7 +1900,7 @@ static loff_t max_inode_blocks(struct inode *inode)
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
- struct buffer_head map_bh;
+ struct f2fs_map_blocks map;
sector_t start_blk, last_blk;
pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
@@ -1945,29 +1932,31 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
goto out;
}
- if (logical_to_blk(inode, len) == 0)
- len = blk_to_logical(inode, 1);
+ if (bytes_to_blks(inode, len) == 0)
+ len = blks_to_bytes(inode, 1);
- start_blk = logical_to_blk(inode, start);
- last_blk = logical_to_blk(inode, start + len - 1);
+ start_blk = bytes_to_blks(inode, start);
+ last_blk = bytes_to_blks(inode, start + len - 1);
next:
- memset(&map_bh, 0, sizeof(struct buffer_head));
- map_bh.b_size = len;
+ memset(&map, 0, sizeof(map));
+ map.m_lblk = start_blk;
+ map.m_len = bytes_to_blks(inode, len);
+ map.m_next_pgofs = &next_pgofs;
+ map.m_seg_type = NO_CHECK_TYPE;
if (compr_cluster)
- map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
+ map.m_len = cluster_size - 1;
- ret = get_data_block(inode, start_blk, &map_bh, 0,
- F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
+ ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto out;
/* HOLE */
- if (!buffer_mapped(&map_bh)) {
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
start_blk = next_pgofs;
- if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+ if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
max_inode_blocks(inode)))
goto prep_next;
@@ -1993,9 +1982,9 @@ next:
compr_cluster = false;
- logical = blk_to_logical(inode, start_blk - 1);
- phys = blk_to_logical(inode, map_bh.b_blocknr);
- size = blk_to_logical(inode, cluster_size);
+ logical = blks_to_bytes(inode, start_blk - 1);
+ phys = blks_to_bytes(inode, map.m_pblk);
+ size = blks_to_bytes(inode, cluster_size);
flags |= FIEMAP_EXTENT_ENCODED;
@@ -2007,20 +1996,20 @@ next:
goto prep_next;
}
- if (map_bh.b_blocknr == COMPRESS_ADDR) {
+ if (map.m_pblk == COMPRESS_ADDR) {
compr_cluster = true;
start_blk++;
goto prep_next;
}
- logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, map_bh.b_blocknr);
- size = map_bh.b_size;
+ logical = blks_to_bytes(inode, start_blk);
+ phys = blks_to_bytes(inode, map.m_pblk);
+ size = blks_to_bytes(inode, map.m_len);
flags = 0;
- if (buffer_unwritten(&map_bh))
+ if (map.m_flags & F2FS_MAP_UNWRITTEN)
flags = FIEMAP_EXTENT_UNWRITTEN;
- start_blk += logical_to_blk(inode, size);
+ start_blk += bytes_to_blks(inode, size);
prep_next:
cond_resched();
@@ -2053,8 +2042,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
bool is_readahead)
{
struct bio *bio = *bio_ret;
- const unsigned blkbits = inode->i_blkbits;
- const unsigned blocksize = 1 << blkbits;
+ const unsigned blocksize = blks_to_bytes(inode, 1);
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
@@ -2063,8 +2051,8 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
block_in_file = (sector_t)page_index(page);
last_block = block_in_file + nr_pages;
- last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
- blkbits;
+ last_block_in_file = bytes_to_blks(inode,
+ f2fs_readpage_limit(inode) + blocksize - 1);
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -2133,7 +2121,7 @@ submit_and_realloc:
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0, page->index,
- false);
+ false, true);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
bio = NULL;
@@ -2177,16 +2165,17 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
struct bio *bio = *bio_ret;
unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
sector_t last_block_in_file;
- const unsigned blkbits = inode->i_blkbits;
- const unsigned blocksize = 1 << blkbits;
+ const unsigned blocksize = blks_to_bytes(inode, 1);
struct decompress_io_ctx *dic = NULL;
+ struct bio_post_read_ctx *ctx;
+ bool for_verity = false;
int i;
int ret = 0;
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
- last_block_in_file = (f2fs_readpage_limit(inode) +
- blocksize - 1) >> blkbits;
+ last_block_in_file = bytes_to_blks(inode,
+ f2fs_readpage_limit(inode) + blocksize - 1);
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
@@ -2245,10 +2234,29 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
goto out_put_dnode;
}
+ /*
+ * It's possible to enable fsverity on the fly when handling a cluster,
+ * which requires complicated error handling. Instead of adding more
+ * complexity, let's give a rule where end_io post-processes fsverity
+ * per cluster. In order to do that, we need to submit bio, if previous
+ * bio sets a different post-process policy.
+ */
+ if (fsverity_active(cc->inode)) {
+ atomic_set(&dic->verity_pages, cc->nr_cpages);
+ for_verity = true;
+
+ if (bio) {
+ ctx = bio->bi_private;
+ if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
+ __submit_bio(sbi, bio, DATA);
+ bio = NULL;
+ }
+ }
+ }
+
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page = dic->cpages[i];
block_t blkaddr;
- struct bio_post_read_ctx *ctx;
blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
@@ -2264,17 +2272,31 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
is_readahead ? REQ_RAHEAD : 0,
- page->index, for_write);
+ page->index, for_write, for_verity);
if (IS_ERR(bio)) {
+ unsigned int remained = dic->nr_cpages - i;
+ bool release = false;
+
ret = PTR_ERR(bio);
dic->failed = true;
- if (!atomic_sub_return(dic->nr_cpages - i,
- &dic->pending_pages)) {
+
+ if (for_verity) {
+ if (!atomic_sub_return(remained,
+ &dic->verity_pages))
+ release = true;
+ } else {
+ if (!atomic_sub_return(remained,
+ &dic->pending_pages))
+ release = true;
+ }
+
+ if (release) {
f2fs_decompress_end_io(dic->rpages,
- cc->cluster_size, true,
- false);
+ cc->cluster_size, true,
+ false);
f2fs_free_dic(dic);
}
+
f2fs_put_dnode(&dn);
*bio_ret = NULL;
return ret;
@@ -3164,7 +3186,7 @@ static inline bool __should_serialize_io(struct inode *inode,
if (IS_NOQUOTA(inode))
return false;
- if (f2fs_compressed_file(inode))
+ if (f2fs_need_compress_data(inode))
return true;
if (wbc->sync_mode != WB_SYNC_ALL)
return true;
@@ -3799,9 +3821,6 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
- struct buffer_head tmp = {
- .b_size = i_blocksize(inode),
- };
sector_t blknr = 0;
if (f2fs_has_inline_data(inode))
@@ -3818,8 +3837,16 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
if (f2fs_compressed_file(inode)) {
blknr = f2fs_bmap_compress(inode, block);
} else {
- if (!get_data_block_bmap(inode, block, &tmp, 0))
- blknr = tmp.b_blocknr;
+ struct f2fs_map_blocks map;
+
+ memset(&map, 0, sizeof(map));
+ map.m_lblk = block;
+ map.m_len = 1;
+ map.m_next_pgofs = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
+
+ if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
+ blknr = map.m_pblk;
}
out:
trace_f2fs_bmap(inode, block, blknr);
@@ -3895,7 +3922,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
sector_t highest_pblock = 0;
int nr_extents = 0;
unsigned long nr_pblocks;
- unsigned long len;
+ u64 len;
int ret;
/*
@@ -3903,29 +3930,31 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
* to be very smart.
*/
cur_lblock = 0;
- last_lblock = logical_to_blk(inode, i_size_read(inode));
+ last_lblock = bytes_to_blks(inode, i_size_read(inode));
len = i_size_read(inode);
while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
- struct buffer_head map_bh;
+ struct f2fs_map_blocks map;
pgoff_t next_pgofs;
cond_resched();
- memset(&map_bh, 0, sizeof(struct buffer_head));
- map_bh.b_size = len - cur_lblock;
+ memset(&map, 0, sizeof(map));
+ map.m_lblk = cur_lblock;
+ map.m_len = bytes_to_blks(inode, len) - cur_lblock;
+ map.m_next_pgofs = &next_pgofs;
+ map.m_seg_type = NO_CHECK_TYPE;
- ret = get_data_block(inode, cur_lblock, &map_bh, 0,
- F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
+ ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto err_out;
/* hole */
- if (!buffer_mapped(&map_bh))
+ if (!(map.m_flags & F2FS_MAP_FLAGS))
goto err_out;
- pblock = map_bh.b_blocknr;
- nr_pblocks = logical_to_blk(inode, map_bh.b_size);
+ pblock = map.m_pblk;
+ nr_pblocks = map.m_len;
if (cur_lblock + nr_pblocks >= sis->max)
nr_pblocks = sis->max - cur_lblock;
@@ -3968,7 +3997,6 @@ static int check_swap_activate(struct swap_info_struct *sis,
struct inode *inode = mapping->host;
unsigned blocks_per_page;
unsigned long page_no;
- unsigned blkbits;
sector_t probe_block;
sector_t last_block;
sector_t lowest_block = -1;
@@ -3979,8 +4007,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
if (PAGE_SIZE == F2FS_BLKSIZE)
return check_swap_activate_fast(sis, swap_file, span);
- blkbits = inode->i_blkbits;
- blocks_per_page = PAGE_SIZE >> blkbits;
+ blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
/*
* Map all the blocks into the extent list. This code doesn't try
@@ -3988,7 +4015,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
*/
probe_block = 0;
page_no = 0;
- last_block = i_size_read(inode) >> blkbits;
+ last_block = bytes_to_blks(inode, i_size_read(inode));
while ((probe_block + blocks_per_page) <= last_block &&
page_no < sis->max) {
unsigned block_in_page;
@@ -4028,7 +4055,7 @@ static int check_swap_activate(struct swap_info_struct *sis,
}
}
- first_block >>= (PAGE_SHIFT - blkbits);
+ first_block >>= (PAGE_SHIFT - inode->i_blkbits);
if (page_no) { /* exclude the header page */
if (first_block < lowest_block)
lowest_block = first_block;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a8357fd4f5fa..197c914119da 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -145,8 +145,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->node_pages = NODE_MAPPING(sbi)->nrpages;
if (sbi->meta_inode)
si->meta_pages = META_MAPPING(sbi)->nrpages;
- si->nats = NM_I(sbi)->nat_cnt;
- si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
+ si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
+ si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
@@ -278,9 +278,10 @@ get_cache:
si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
sizeof(struct free_nid);
- si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
- si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
- sizeof(struct nat_entry_set);
+ si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] *
+ sizeof(struct nat_entry);
+ si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
+ sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
for (i = 0; i < MAX_INO_ENTRY; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4b9ef8bbfa4a..e6270a867be1 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -5,6 +5,7 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
+#include <asm/unaligned.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/sched/signal.h>
@@ -206,30 +207,55 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
+ *
+ * Returns 1 for a match, 0 for no match, and -errno on an error.
*/
-static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
+static int f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
const u8 *de_name, u32 de_name_len)
{
const struct super_block *sb = dir->i_sb;
const struct unicode_map *um = sb->s_encoding;
+ struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
struct qstr entry = QSTR_INIT(de_name, de_name_len);
int res;
+ if (IS_ENCRYPTED(dir)) {
+ const struct fscrypt_str encrypted_name =
+ FSTR_INIT((u8 *)de_name, de_name_len);
+
+ if (WARN_ON_ONCE(!fscrypt_has_encryption_key(dir)))
+ return -EINVAL;
+
+ decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
+ if (!decrypted_name.name)
+ return -ENOMEM;
+ res = fscrypt_fname_disk_to_usr(dir, 0, 0, &encrypted_name,
+ &decrypted_name);
+ if (res < 0)
+ goto out;
+ entry.name = decrypted_name.name;
+ entry.len = decrypted_name.len;
+ }
+
res = utf8_strncasecmp_folded(um, name, &entry);
- if (res < 0) {
- /*
- * In strict mode, ignore invalid names. In non-strict mode,
- * fall back to treating them as opaque byte sequences.
- */
- if (sb_has_strict_encoding(sb) || name->len != entry.len)
- return false;
- return !memcmp(name->name, entry.name, name->len);
+ /*
+ * In strict mode, ignore invalid names. In non-strict mode,
+ * fall back to treating them as opaque byte sequences.
+ */
+ if (res < 0 && !sb_has_strict_encoding(sb)) {
+ res = name->len == entry.len &&
+ memcmp(name->name, entry.name, name->len) == 0;
+ } else {
+ /* utf8_strncasecmp_folded returns 0 on match */
+ res = (res == 0);
}
- return res == 0;
+out:
+ kfree(decrypted_name.name);
+ return res;
}
#endif /* CONFIG_UNICODE */
-static inline bool f2fs_match_name(const struct inode *dir,
+static inline int f2fs_match_name(const struct inode *dir,
const struct f2fs_filename *fname,
const u8 *de_name, u32 de_name_len)
{
@@ -256,6 +282,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
+ int res = 0;
if (max_slots)
*max_slots = 0;
@@ -273,10 +300,15 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
continue;
}
- if (de->hash_code == fname->hash &&
- f2fs_match_name(d->inode, fname, d->filename[bit_pos],
- le16_to_cpu(de->name_len)))
- goto found;
+ if (de->hash_code == fname->hash) {
+ res = f2fs_match_name(d->inode, fname,
+ d->filename[bit_pos],
+ le16_to_cpu(de->name_len));
+ if (res < 0)
+ return ERR_PTR(res);
+ if (res)
+ goto found;
+ }
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
@@ -326,7 +358,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
de = find_in_block(dir, dentry_page, fname, &max_slots);
- if (de) {
+ if (IS_ERR(de)) {
+ *res_page = ERR_CAST(de);
+ de = NULL;
+ break;
+ } else if (de) {
*res_page = dentry_page;
break;
}
@@ -448,17 +484,39 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_put_page(page, 1);
}
-static void init_dent_inode(const struct f2fs_filename *fname,
+static void init_dent_inode(struct inode *dir, struct inode *inode,
+ const struct f2fs_filename *fname,
struct page *ipage)
{
struct f2fs_inode *ri;
+ if (!fname) /* tmpfile case? */
+ return;
+
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
ri->i_namelen = cpu_to_le32(fname->disk_name.len);
memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
+ if (IS_ENCRYPTED(dir)) {
+ file_set_enc_name(inode);
+ /*
+ * Roll-forward recovery doesn't have encryption keys available,
+ * so it can't compute the dirhash for encrypted+casefolded
+ * filenames. Append it to i_name if possible. Else, disable
+ * roll-forward recovery of the dentry (i.e., make fsync'ing the
+ * file force a checkpoint) by setting LOST_PINO.
+ */
+ if (IS_CASEFOLDED(dir)) {
+ if (fname->disk_name.len + sizeof(f2fs_hash_t) <=
+ F2FS_NAME_LEN)
+ put_unaligned(fname->hash, (f2fs_hash_t *)
+ &ri->i_name[fname->disk_name.len]);
+ else
+ file_lost_pino(inode);
+ }
+ }
set_page_dirty(ipage);
}
@@ -541,11 +599,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
return page;
}
- if (fname) {
- init_dent_inode(fname, page);
- if (IS_ENCRYPTED(dir))
- file_set_enc_name(inode);
- }
+ init_dent_inode(dir, inode, fname, page);
/*
* This file should be checkpointed during fsync.
@@ -1022,7 +1076,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
int err = 0;
if (IS_ENCRYPTED(inode)) {
- err = fscrypt_get_encryption_info(inode);
+ err = fscrypt_prepare_readdir(inode);
if (err)
goto out;
@@ -1081,28 +1135,13 @@ out:
return err < 0 ? err : 0;
}
-static int f2fs_dir_open(struct inode *inode, struct file *filp)
-{
- if (IS_ENCRYPTED(inode))
- return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
- return 0;
-}
-
const struct file_operations f2fs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = f2fs_readdir,
.fsync = f2fs_sync_file,
- .open = f2fs_dir_open,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#endif
};
-
-#ifdef CONFIG_UNICODE
-const struct dentry_operations f2fs_dentry_ops = {
- .d_hash = generic_ci_d_hash,
- .d_compare = generic_ci_d_compare,
-};
-#endif
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index cb700d797296..bb11759191dc 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -33,10 +33,8 @@
#else
#define f2fs_bug_on(sbi, condition) \
do { \
- if (unlikely(condition)) { \
- WARN_ON(1); \
+ if (WARN_ON(condition)) \
set_sbi_flag(sbi, SBI_NEED_FSCK); \
- } \
} while (0)
#endif
@@ -147,8 +145,10 @@ struct f2fs_mount_info {
/* For compression */
unsigned char compress_algorithm; /* algorithm type */
- unsigned compress_log_size; /* cluster log size */
+ unsigned char compress_log_size; /* cluster log size */
+ bool compress_chksum; /* compressed data chksum */
unsigned char compress_ext_cnt; /* extension count */
+ int compress_mode; /* compression mode */
unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
};
@@ -402,85 +402,6 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
return size <= MAX_SIT_JENTRIES(journal);
}
-/*
- * f2fs-specific ioctl commands
- */
-#define F2FS_IOCTL_MAGIC 0xf5
-#define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
-#define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
-#define