summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/relocation.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2020-02-25 14:20:13 +0800
committerDavid Sterba <dsterba@suse.com>2020-05-25 11:25:18 +0200
commit1f872924663f9a15924cc7169932608c1d697ee1 (patch)
tree15047e703f9a39ef78b593814c965527187fc58c /fs/btrfs/relocation.c
parente7d571c7b004dc20f385d53d0c89e99d078e0415 (diff)
btrfs: reloc: refactor finishing part of upper linkage into finish_upper_links()
After handle_one_tree_backref(), all newly added (not cached) edges and nodes have the following features: - Only backref_edge::list[LOWER] is linked. This means, we can only iterate from botton to top, not the other direction. - Newly added nodes are not added to cache rb_tree yet So to finish the backref cache, we still need to finish the links and add all nodes into backref cache rb_tree. This patch will refactor the existing code into finish_upper_links(), add more comments of each branch, and why we need to do all the work. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/relocation.c')
-rw-r--r--fs/btrfs/relocation.c186
1 files changed, 117 insertions, 69 deletions
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ad94482a2e65..29d53400c64c 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1081,6 +1081,118 @@ out:
}
/*
+ * In handle_one_tree_backref(), we have only linked the lower node to the edge,
+ * but the upper node hasn't been linked to the edge.
+ * This means we can only iterate through backref_node::upper to reach parent
+ * edges, but not through backref_node::lower to reach children edges.
+ *
+ * This function will finish the backref_node::lower to related edges, so that
+ * backref cache can be bi-directionally iterated.
+ *
+ * Also, this will add the nodes to backref cache for the next run.
+ */
+static int finish_upper_links(struct backref_cache *cache,
+ struct backref_node *start)
+{
+ struct list_head *useless_node = &cache->useless_node;
+ struct backref_edge *edge;
+ struct rb_node *rb_node;
+ LIST_HEAD(pending_edge);
+
+ ASSERT(start->checked);
+
+ /* Insert this node to cache if it's not COW-only */
+ if (!start->cowonly) {
+ rb_node = tree_insert(&cache->rb_root, start->bytenr,
+ &start->rb_node);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, start->bytenr);
+ list_add_tail(&start->lower, &cache->leaves);
+ }
+
+ /*
+ * Use breadth first search to iterate all related edges.
+ *
+ * The starting points are all the edges of this node
+ */
+ list_for_each_entry(edge, &start->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+
+ while (!list_empty(&pending_edge)) {
+ struct backref_node *upper;
+ struct backref_node *lower;
+ struct rb_node *rb_node;
+
+ edge = list_first_entry(&pending_edge, struct backref_edge,
+ list[UPPER]);
+ list_del_init(&edge->list[UPPER]);
+ upper = edge->node[UPPER];
+ lower = edge->node[LOWER];
+
+ /* Parent is detached, no need to keep any edges */
+ if (upper->detached) {
+ list_del(&edge->list[LOWER]);
+ free_backref_edge(cache, edge);
+
+ /* Lower node is orphan, queue for cleanup */
+ if (list_empty(&lower->upper))
+ list_add(&lower->list, useless_node);
+ continue;
+ }
+
+ /*
+ * All new nodes added in current build_backref_tree() haven't
+ * been linked to the cache rb tree.
+ * So if we have upper->rb_node populated, this means a cache
+ * hit. We only need to link the edge, as @upper and all its
+ * parent have already been linked.
+ */
+ if (!RB_EMPTY_NODE(&upper->rb_node)) {
+ if (upper->lowest) {
+ list_del_init(&upper->lower);
+ upper->lowest = 0;
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+ continue;
+ }
+
+ /* Sanity check, we shouldn't have any unchecked nodes */
+ if (!upper->checked) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Sanity check, COW-only node has non-COW-only parent */
+ if (start->cowonly != upper->cowonly) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Only cache non-COW-only (subvolume trees) tree blocks */
+ if (!upper->cowonly) {
+ rb_node = tree_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+ if (rb_node) {
+ backref_tree_panic(rb_node, -EEXIST,
+ upper->bytenr);
+ return -EUCLEAN;
+ }
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+
+ /*
+ * Also queue all the parent edges of this uncached node to
+ * finish the upper linkage
+ */
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+ }
+ return 0;
+}
+
+/*
* Build backref tree for a given tree block. Root of the backref tree
* corresponds the tree block, leaves of the backref tree correspond roots of
* b-trees that reference the tree block.
@@ -1107,8 +1219,6 @@ static noinline_for_stack struct backref_node *build_backref_tree(
struct backref_node *lower;
struct backref_node *node = NULL;
struct backref_edge *edge;
- struct rb_node *rb_node;
- int cowonly;
int ret;
int err = 0;
@@ -1149,75 +1259,13 @@ static noinline_for_stack struct backref_node *build_backref_tree(
}
} while (edge);
- /*
- * everything goes well, connect backref nodes and insert backref nodes
- * into the cache.
- */
- ASSERT(node->checked);
- cowonly = node->cowonly;
- if (!cowonly) {
- rb_node = tree_insert(&cache->rb_root, node->bytenr,
- &node->rb_node);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, node->bytenr);
- list_add_tail(&node->lower, &cache->leaves);
+ /* Finish the upper linkage of newly added edges/nodes */
+ ret = finish_upper_links(cache, node);
+ if (ret < 0) {
+ err = ret;
+ goto out;
}
- list_for_each_entry(edge, &node->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &cache->pending_edge);
-
- while (!list_empty(&cache->pending_edge)) {
- edge = list_first_entry(&cache->pending_edge,
- struct backref_edge, list[UPPER]);
- list_del_init(&edge->list[UPPER]);
- upper = edge->node[UPPER];
- if (upper->detached) {
- list_del(&edge->list[LOWER]);
- lower = edge->node[LOWER];
- free_backref_edge(cache, edge);
- if (list_empty(&lower->upper))
- list_add(&lower->list, &cache->useless_node);
- continue;
- }
-
- if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
- list_add_tail(&edge->list[UPPER], &upper->lower);
- continue;
- }
-
- if (!upper->checked) {
- /*
- * Still want to blow up for developers since this is a
- * logic bug.
- */
- ASSERT(0);
- err = -EINVAL;
- goto out;
- }
- if (cowonly != upper->cowonly) {
- ASSERT(0);
- err = -EINVAL;
- goto out;
- }
-
- if (!cowonly) {
- rb_node = tree_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST,
- upper->bytenr);
- }
-
- list_add_tail(&edge->list[UPPER], &upper->lower);
-
- list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &cache->pending_edge);
- }
/*
* process useless backref nodes. backref nodes for tree leaves
* are deleted from the cache. backref nodes for upper level