Blob Blame History Raw
From 2335efafa63f0c675ebb4f8908fff9e972fb8a58 Mon Sep 17 00:00:00 2001
From: Nikolay Borisov <nborisov@suse.com>
Date: Tue, 24 Apr 2018 17:18:24 +0300
Git-commit: 2335efafa63f0c675ebb4f8908fff9e972fb8a58
Patch-mainline: v4.18
References: bsc#1134813
Subject: [PATCH 8/8] btrfs: split delayed ref head initialization and addition

add_delayed_ref_head really performed 2 independent operations -
initialisting the ref head and adding it to a list. Now that the init
part is in a separate function let's complete the separation between
both operations. This results in a lot simpler interface for
add_delayed_ref_head since the function now deals solely with either
adding the newly initialised delayed ref head or merging it into an
existing delayed ref head. This results in vastly simplified function
signature since 5 arguments are dropped. The only other thing worth
mentioning is that due to this split the WARN_ON catching reinit of
existing. In this patch the condition is extended such that:

  qrecord && head_ref->qgroup_ref_root && head_ref->qgroup_reserved

is added. This is done because the two qgroup_* prefixed member are
set only if both ref_root and reserved are passed. So functionally
it's equivalent to the old WARN_ON and allows to remove the two args
from add_delayed_ref_head.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/delayed-ref.c |   42 ++++++++++++++++++++----------------------
 1 file changed, 20 insertions(+), 22 deletions(-)

--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -617,9 +617,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
 		     struct btrfs_trans_handle *trans,
 		     struct btrfs_delayed_ref_head *head_ref,
 		     struct btrfs_qgroup_extent_record *qrecord,
-		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
-		     int action, int is_data, int is_system,
-		     int *qrecord_inserted_ret,
+		     int action, int *qrecord_inserted_ret,
 		     int *old_ref_mod, int *new_ref_mod)
 
 {
@@ -629,9 +627,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
 
 	delayed_refs = &trans->transaction->delayed_refs;
 
-	init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
-			      reserved, action, is_data, is_system);
-
 	/* Record qgroup extent info if provided */
 	if (qrecord) {
 		if (btrfs_qgroup_trace_extent_nolock(fs_info,
@@ -646,7 +641,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
 	existing = htree_insert(&delayed_refs->href_root,
 				&head_ref->href_node);
 	if (existing) {
-		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
+		WARN_ON(qrecord && head_ref->qgroup_ref_root
+			&& head_ref->qgroup_reserved
+			&& existing->qgroup_ref_root
 			&& existing->qgroup_reserved);
 		update_existing_head_ref(delayed_refs, existing, head_ref,
 					 old_ref_mod);
@@ -659,8 +656,8 @@ add_delayed_ref_head(struct btrfs_fs_inf
 	} else {
 		if (old_ref_mod)
 			*old_ref_mod = 0;
-		if (is_data && head_ref->ref_mod < 0)
-			delayed_refs->pending_csums += num_bytes;
+		if (head_ref->is_data && head_ref->ref_mod < 0)
+			delayed_refs->pending_csums += head_ref->num_bytes;
 		delayed_refs->num_heads++;
 		delayed_refs->num_heads_ready++;
 		atomic_inc(&delayed_refs->num_entries);
@@ -670,6 +667,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
 		*qrecord_inserted_ret = qrecord_inserted;
 	if (new_ref_mod)
 		*new_ref_mod = head_ref->total_ref_mod;
+
 	return head_ref;
 }
 
@@ -741,7 +739,7 @@ int btrfs_add_delayed_tree_ref(struct bt
 	struct btrfs_delayed_ref_root *delayed_refs;
 	struct btrfs_qgroup_extent_record *record = NULL;
 	int qrecord_inserted;
-	int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+	bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
 	int ret;
 	u8 ref_type;
 
@@ -771,6 +769,8 @@ int btrfs_add_delayed_tree_ref(struct bt
 			goto free_head_ref;
 	}
 
+	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
+			      ref_root, 0, action, false, is_system);
 	head_ref->extent_op = extent_op;
 
 	delayed_refs = &trans->transaction->delayed_refs;
@@ -781,8 +781,7 @@ int btrfs_add_delayed_tree_ref(struct bt
 	 * the spin lock
 	 */
 	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
-					bytenr, num_bytes, 0, 0, action, 0,
-					is_system, &qrecord_inserted,
+					action, &qrecord_inserted,
 					old_ref_mod, new_ref_mod);
 
 
@@ -858,6 +857,8 @@ int btrfs_add_delayed_data_ref(struct bt
 		}
 	}
 
+	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
+			      reserved, action, true, false);
 	head_ref->extent_op = NULL;
 
 	delayed_refs = &trans->transaction->delayed_refs;
@@ -868,8 +869,7 @@ int btrfs_add_delayed_data_ref(struct bt
 	 * the spin lock
 	 */
 	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
-					bytenr, num_bytes, ref_root, reserved,
-					action, 1, 0, &qrecord_inserted,
+					action, &qrecord_inserted,
 					old_ref_mod, new_ref_mod);
 
 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
@@ -898,19 +898,17 @@ int btrfs_add_delayed_extent_op(struct b
 	if (!head_ref)
 		return -ENOMEM;
 
+	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
+			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
+			      false);
 	head_ref->extent_op = extent_op;
 
 	delayed_refs = &trans->transaction->delayed_refs;
 	spin_lock(&delayed_refs->lock);
 
-	/*
-	 * extent_ops just modify the flags of an extent and they don't result
-	 * in ref count changes, hence it's safe to pass false/0 for is_system
-	 * argument
-	 */
-	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
-			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
-			     extent_op->is_data, 0, NULL, NULL, NULL);
+	add_delayed_ref_head(fs_info, trans, head_ref, NULL,
+			     BTRFS_UPDATE_DELAYED_HEAD,
+			     NULL, NULL, NULL);
 
 	spin_unlock(&delayed_refs->lock);
 	return 0;