From 370a11b8114bcca3738fe6a5d7ed8babcc212f39 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Jan 2019 15:15:16 +0800 Subject: btrfs: qgroup: Introduce per-root swapped blocks infrastructure To allow delayed subtree swap rescan, btrfs needs to record per-root information about which tree blocks get swapped. This patch introduces the required infrastructure. The designed workflow will be: 1) Record the subtree root block that gets swapped. During subtree swap: O = Old tree blocks N = New tree blocks reloc tree subvolume tree X Root Root / \ / \ NA OB OA OB / | | \ / | | \ NC ND OE OF OC OD OE OF In this case, NA and OA are going to be swapped, record (NA, OA) into subvolume tree X. 2) After subtree swap. reloc tree subvolume tree X Root Root / \ / \ OA OB NA OB / | | \ / | | \ OC OD OE OF NC ND OE OF 3a) COW happens for OB If we are going to COW tree block OB, we check OB's bytenr against tree X's swapped_blocks structure. If it doesn't fit any, nothing will happen. 3b) COW happens for NA Check NA's bytenr against tree X's swapped_blocks, and get a hit. Then we do subtree scan on both subtrees OA and NA. Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND). Then no matter what we do to subvolume tree X, qgroup numbers will still be correct. Then NA's record gets removed from X's swapped_blocks. 4) Transaction commit Any record in X's swapped_blocks gets removed, since there is no modification to swapped subtrees, no need to trigger heavy qgroup subtree rescan for them. This will introduce 128 bytes overhead for each btrfs_root even qgroup is not enabled. This is to reduce memory allocations and potential failures. Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/qgroup.h | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) (limited to 'fs/btrfs/qgroup.h') diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 20c6bd5fa701..8dc17020e5be 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -6,6 +6,8 @@ #ifndef BTRFS_QGROUP_H #define BTRFS_QGROUP_H +#include +#include #include "ulist.h" #include "delayed-ref.h" @@ -37,6 +39,66 @@ * Normally at qgroup rescan and transaction commit time. */ +/* + * Special performance optimization for balance. + * + * For balance, we need to swap subtree of subvolume and reloc trees. + * In theory, we need to trace all subtree blocks of both subvolume and reloc + * trees, since their owner has changed during such swap. + * + * However since balance has ensured that both subtrees are containing the + * same contents and have the same tree structures, such swap won't cause + * qgroup number change. + * + * But there is a race window between subtree swap and transaction commit, + * during that window, if we increase/decrease tree level or merge/split tree + * blocks, we still need to trace the original subtrees. + * + * So for balance, we use a delayed subtree tracing, whose workflow is: + * + * 1) Record the subtree root block get swapped. + * + * During subtree swap: + * O = Old tree blocks + * N = New tree blocks + * reloc tree subvolume tree X + * Root Root + * / \ / \ + * NA OB OA OB + * / | | \ / | | \ + * NC ND OE OF OC OD OE OF + * + * In this case, NA and OA are going to be swapped, record (NA, OA) into + * subvolume tree X. + * + * 2) After subtree swap. + * reloc tree subvolume tree X + * Root Root + * / \ / \ + * OA OB NA OB + * / | | \ / | | \ + * OC OD OE OF NC ND OE OF + * + * 3a) COW happens for OB + * If we are going to COW tree block OB, we check OB's bytenr against + * tree X's swapped_blocks structure. + * If it doesn't fit any, nothing will happen. + * + * 3b) COW happens for NA + * Check NA's bytenr against tree X's swapped_blocks, and get a hit. + * Then we do subtree scan on both subtrees OA and NA. + * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND). + * + * Then no matter what we do to subvolume tree X, qgroup numbers will + * still be correct. + * Then NA's record gets removed from X's swapped_blocks. + * + * 4) Transaction commit + * Any record in X's swapped_blocks gets removed, since there is no + * modification to the swapped subtrees, no need to trigger heavy qgroup + * subtree rescan for them. + */ + /* * Record a dirty extent, and info qgroup to update quota on it * TODO: Use kmem cache to alloc it. @@ -48,6 +110,24 @@ struct btrfs_qgroup_extent_record { struct ulist *old_roots; }; +struct btrfs_qgroup_swapped_block { + struct rb_node node; + + int level; + bool trace_leaf; + + /* bytenr/generation of the tree block in subvolume tree after swap */ + u64 subvol_bytenr; + u64 subvol_generation; + + /* bytenr/generation of the tree block in reloc tree after swap */ + u64 reloc_bytenr; + u64 reloc_generation; + + u64 last_snapshot; + struct btrfs_key first_key; +}; + /* * Qgroup reservation types: * @@ -325,4 +405,16 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); void btrfs_qgroup_check_reserved_leak(struct inode *inode); +/* btrfs_qgroup_swapped_blocks related functions */ +void btrfs_qgroup_init_swapped_blocks( + struct btrfs_qgroup_swapped_blocks *swapped_blocks); + +void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root); +int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, + struct btrfs_root *subvol_root, + struct btrfs_block_group_cache *bg, + struct extent_buffer *subvol_parent, int subvol_slot, + struct extent_buffer *reloc_parent, int reloc_slot, + u64 last_snapshot); + #endif -- cgit v1.2.3 From f616f5cd9da7fceb7d884812da380b26040cd083 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Jan 2019 15:15:17 +0800 Subject: btrfs: qgroup: Use delayed subtree rescan for balance Before this patch, qgroup code traces the whole subtree of subvolume and reloc trees unconditionally. This makes qgroup numbers consistent, but it could cause tons of unnecessary extent tracing, which causes a lot of overhead. However for subtree swap of balance, just swap both subtrees because they contain the same contents and tree structure, so qgroup numbers won't change. It's the race window between subtree swap and transaction commit could cause qgroup number change. This patch will delay the qgroup subtree scan until COW happens for the subtree root. So if there is no other operations for the fs, balance won't cause extra qgroup overhead. (best case scenario) Depending on the workload, most of the subtree scan can still be avoided. Only for worst case scenario, it will fall back to old subtree swap overhead. (scan all swapped subtrees) [[Benchmark]] Hardware: VM 4G vRAM, 8 vCPUs, disk is using 'unsafe' cache mode, backing device is SAMSUNG 850 evo SSD. Host has 16G ram. Mkfs parameter: --nodesize 4K (To bump up tree size) Initial subvolume contents: 4G data copied from /usr and /lib. (With enough regular small files) Snapshots: 16 snapshots of the original subvolume. each snapshot has 3 random files modified. balance parameter: -m So the content should be pretty similar to a real world root fs layout. And after file system population, there is no other activity, so it should be the best case scenario. | v4.20-rc1 | w/ patchset | diff ----------------------------------------------------------------------- relocated extents | 22615 | 22457 | -0.1% qgroup dirty extents | 163457 | 121606 | -25.6% time (sys) | 22.884s | 18.842s | -17.6% time (real) | 27.724s | 22.884s | -17.5% Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/qgroup.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs/qgroup.h') diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 8dc17020e5be..3f5e0b413312 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -416,5 +416,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, struct extent_buffer *subvol_parent, int subvol_slot, struct extent_buffer *reloc_parent, int reloc_slot, u64 last_snapshot); +int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct extent_buffer *eb); #endif -- cgit v1.2.3 From 9627736b75f612e05cef122b215a68113af9cd4d Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Jan 2019 15:15:18 +0800 Subject: btrfs: qgroup: Cleanup old subtree swap code Since it's replaced by new delayed subtree swap code, remove the original code. The cleanup is small since most of its core function is still used by delayed subtree swap trace. Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/qgroup.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs/btrfs/qgroup.h') diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 3f5e0b413312..5e93733b78c8 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -316,12 +316,6 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, struct extent_buffer *root_eb, u64 root_gen, int root_level); - -int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *bg_cache, - struct extent_buffer *src_parent, int src_slot, - struct extent_buffer *dst_parent, int dst_slot, - u64 last_snapshot); int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, struct ulist *old_roots, struct ulist *new_roots); -- cgit v1.2.3 From 1418bae1c22951aad9883bc8f8f4dccb272cce1e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 23 Jan 2019 15:15:12 +0800 Subject: btrfs: qgroup: Move reserved data accounting from btrfs_delayed_ref_head to btrfs_qgroup_extent_record [BUG] Btrfs/139 will fail with a high probability if the testing machine (VM) has only 2G RAM. Resulting the final write success while it should fail due to EDQUOT, and the fs will have quota exceeding the limit by 16K. The simplified reproducer will be: (needs a 2G ram VM) $ mkfs.btrfs -f $dev $ mount $dev $mnt $ btrfs subv create $mnt/subv $ btrfs quota enable $mnt $ btrfs quota rescan -w $mnt $ btrfs qgroup limit -e 1G $mnt/subv $ for i in $(seq -w 1 8); do xfs_io -f -c "pwrite 0 128M" $mnt/subv/file_$i > /dev/null echo "file $i written" > /dev/kmsg done $ sync $ btrfs qgroup show -pcre --raw $mnt The last pwrite will not trigger EDQUOT and final 'qgroup show' will show something like: qgroupid rfer excl max_rfer max_excl parent child -------- ---- ---- -------- -------- ------ ----- 0/5 16384 16384 none none --- --- 0/256 1073758208 1073758208 none 1073741824 --- --- And 1073758208 is larger than > 1073741824. [CAUSE] It's a bug in btrfs qgroup data reserved space management. For quota limit, we must ensure that: reserved (data + metadata) + rfer/excl <= limit Since rfer/excl is only updated at transaction commmit time, reserved space needs to be taken special care. One important part of reserved space is data, and for a new data extent written to disk, we still need to take the reserved space until rfer/excl numbers get updated. Originally when an ordered extent finishes, we migrate the reserved qgroup data space from extent_io tree to delayed ref head of the data extent, expecting delayed ref will only be cleaned up at commit transaction time. However for small RAM machine, due to memory pressure dirty pages can be flushed back to disk without committing a transaction. The related events will be something like: file 1 written btrfs_finish_ordered_io: ino=258 ordered offset=0 len=54947840 btrfs_finish_ordered_io: ino=258 ordered offset=54947840 len=5636096 btrfs_finish_ordered_io: ino=258 ordered offset=61153280 len=57344 btrfs_finish_ordered_io: ino=258 ordered offset=61210624 len=8192 btrfs_finish_ordered_io: ino=258 ordered offset=60583936 len=569344 cleanup_ref_head: num_bytes=54947840 cleanup_ref_head: num_bytes=5636096 cleanup_ref_head: num_bytes=569344 cleanup_ref_head: num_bytes=57344 cleanup_ref_head: num_bytes=8192 ^^^^^^^^^^^^^^^^ This will free qgroup data reserved space file 2 written ... file 8 written cleanup_ref_head: num_bytes=8192 ... btrfs_commit_transaction <<< the only transaction committed during the test When file 2 is written, we have already freed 128M reserved qgroup data space for ino 258. Thus later write won't trigger EDQUOT. This allows us to write more data beyond qgroup limit. In my 2G ram VM, it could reach about 1.2G before hitting EDQUOT. [FIX] By moving reserved qgroup data space from btrfs_delayed_ref_head to btrfs_qgroup_extent_record, we can ensure that reserved qgroup data space won't be freed half way before commit transaction, thus fix the problem. Fixes: f64d5ca86821 ("btrfs: delayed_ref: Add new function to record reserved space into delayed ref") Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/qgroup.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/qgroup.h') diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 5e93733b78c8..46ba7bd2961c 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -107,6 +107,17 @@ struct btrfs_qgroup_extent_record { struct rb_node node; u64 bytenr; u64 num_bytes; + + /* + * For qgroup reserved data space freeing. + * + * @data_rsv_refroot and @data_rsv will be recorded after + * BTRFS_ADD_DELAYED_EXTENT is called. + * And will be used to free reserved qgroup space at + * transaction commit time. + */ + u32 data_rsv; /* reserved data space needs to be freed */ + u64 data_rsv_refroot; /* which root the reserved data belongs to */ struct ulist *old_roots; }; @@ -326,15 +337,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, u64 ref_root, u64 num_bytes, enum btrfs_qgroup_rsv_type type); -static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info, - u64 ref_root, u64 num_bytes) -{ - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - return; - trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes); - btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes, - BTRFS_QGROUP_RSV_DATA); -} #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, -- cgit v1.2.3