From 0e2b7385cb59e566520cfd0a04b4b53bc9461e98 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 2 Sep 2020 15:01:52 +0800 Subject: f2fs: allocate proper size memory for zstd decompress As 5kft <5kft@5kft.org> reported: kworker/u9:3: page allocation failure: order:9, mode:0x40c40(GFP_NOFS|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0 CPU: 3 PID: 8168 Comm: kworker/u9:3 Tainted: G C 5.8.3-sunxi #trunk Hardware name: Allwinner sun8i Family Workqueue: f2fs_post_read_wq f2fs_post_read_work [] (unwind_backtrace) from [] (show_stack+0x11/0x14) [] (show_stack) from [] (dump_stack+0x75/0x84) [] (dump_stack) from [] (warn_alloc+0xa3/0x104) [] (warn_alloc) from [] (__alloc_pages_nodemask+0xb87/0xc40) [] (__alloc_pages_nodemask) from [] (kmalloc_order+0x19/0x38) [] (kmalloc_order) from [] (kmalloc_order_trace+0x19/0x90) [] (kmalloc_order_trace) from [] (zstd_init_decompress_ctx+0x21/0x88) [] (zstd_init_decompress_ctx) from [] (f2fs_decompress_pages+0x97/0x228) [] (f2fs_decompress_pages) from [] (__read_end_io+0xfb/0x130) [] (__read_end_io) from [] (f2fs_post_read_work+0x61/0x84) [] (f2fs_post_read_work) from [] (process_one_work+0x15f/0x3b0) [] (process_one_work) from [] (worker_thread+0xfb/0x3e0) [] (worker_thread) from [] (kthread+0xeb/0x10c) [] (kthread) from [] zstd may allocate large size memory for {,de}compression, it may cause file copy failure on low-end device which has very few memory. For decompression, let's just allocate proper size memory based on current file's cluster size instead of max cluster size. Reported-by: 5kft <5kft@5kft.org> Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/compress.c | 7 ++++--- fs/f2fs/f2fs.h | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index b3d1a8701988..5949372700ee 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -382,16 +382,17 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) ZSTD_DStream *stream; void *workspace; unsigned int workspace_size; + unsigned int max_window_size = + MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size); - workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE); + workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size); workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), workspace_size, GFP_NOFS); if (!workspace) return -ENOMEM; - stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE, - workspace, workspace_size); + stream = ZSTD_initDStream(max_window_size, workspace, workspace_size); if (!stream) { printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n", KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 27f4a839f906..53fe2853579c 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1419,7 +1419,7 @@ struct decompress_io_ctx { #define NULL_CLUSTER ((unsigned int)(~0)) #define MIN_COMPRESS_LOG_SIZE 2 #define MAX_COMPRESS_LOG_SIZE 8 -#define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE) +#define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ -- cgit v1.2.3