aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2023-05-30 11:04:51 +0200
committerCarlos Maiolino <cem@kernel.org>2023-06-09 10:27:50 +0200
commit7cb26322f749ab93cd20588c58aab22a379d76e0 (patch)
treeaf02b3aa0353d600bb27ad6e4ab655d11012b9b6
parent11e716f40c71da49b073eb287c14a908c1fbeb01 (diff)
downloadxfsprogs-dev-7cb26322f749ab93cd20588c58aab22a379d76e0.tar.gz
xfs: allow queued AG intents to drain before scrubbing
Source kernel commit: d5c88131dbf01a30a222ad82d58e0c21a15f0d8e When a writer thread executes a chain of log intent items, the AG header buffer locks will cycle during a transaction roll to get from one intent item to the next in a chain. Although scrub takes all AG header buffer locks, this isn't sufficient to guard against scrub checking an AG while that writer thread is in the middle of finishing a chain because there's no higher level locking primitive guarding allocation groups. When there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) yields false corruption events; if repair is running, this results in incorrect repairs, which is catastrophic. Fix this by adding to the perag structure the count of active intents and make scrub wait until it has both AG header buffer locks and the intent counter reaches zero. One quirk of the drain code is that deferred bmap updates also bump and drop the intent counter. A fundamental decision made during the design phase of the reverse mapping feature is that updates to the rmapbt records are always made by the same code that updates the primary metadata. In other words, callers of bmapi functions expect that the bmapi functions will queue deferred rmap updates. Some parts of the reflink code queue deferred refcount (CUI) and bmap (BUI) updates in the same head transaction, but the deferred work manager completely finishes the CUI before the BUI work is started. As a result, the CUI drops the intent count long before the deferred rmap (RUI) update even has a chance to bump the intent count. The only way to keep the intent count elevated between the CUI and RUI is for the BUI to bump the counter until the RUI has been created. A second quirk of the intent drain code is that deferred work items must increment the intent counter as soon as the work item is added to the transaction. When a BUI completes and queues an RUI, the RUI must increment the counter before the BUI decrements it. The only way to accomplish this is to require that the counter be bumped as soon as the deferred work item is created in memory. In the next patches we'll improve on this facility, but this patch provides the basic functionality. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Carlos Maiolino <cem@kernel.org>
-rw-r--r--include/xfs_mount.h11
-rw-r--r--include/xfs_trace.h3
-rw-r--r--libxfs/xfs_ag.c4
-rw-r--r--libxfs/xfs_ag.h8
-rw-r--r--libxfs/xfs_defer.c6
5 files changed, 30 insertions, 2 deletions
diff --git a/include/xfs_mount.h b/include/xfs_mount.h
index 892a2efdd1..99d1d9ab13 100644
--- a/include/xfs_mount.h
+++ b/include/xfs_mount.h
@@ -287,4 +287,15 @@ typedef struct wait_queue_head {
static inline void wake_up(wait_queue_head_t *wq) {}
+struct xfs_defer_drain { /* empty */ };
+
+#define xfs_defer_drain_init(dr) ((void)0)
+#define xfs_defer_drain_free(dr) ((void)0)
+
+#define xfs_perag_intent_get(mp, agno) xfs_perag_get((mp), (agno))
+#define xfs_perag_intent_put(pag) xfs_perag_put(pag)
+
+static inline void xfs_perag_intent_hold(struct xfs_perag *pag) {}
+static inline void xfs_perag_intent_rele(struct xfs_perag *pag) {}
+
#endif /* __XFS_MOUNT_H__ */
diff --git a/include/xfs_trace.h b/include/xfs_trace.h
index b3e5ff1f53..7fd446ad42 100644
--- a/include/xfs_trace.h
+++ b/include/xfs_trace.h
@@ -81,6 +81,9 @@
#define trace_xfs_iext_insert(a,b,c,d) ((void) 0)
#define trace_xfs_iext_remove(a,b,c,d) ((void) 0)
+#define trace_xfs_defer_add_item(a,b,c) ((void) 0)
+#define trace_xfs_defer_cancel_item(a,b,c) ((void) 0)
+#define trace_xfs_defer_finish_item(a,b,c) ((void) 0)
#define trace_xfs_defer_relog_intent(a,b) ((void) 0)
#define trace_xfs_dir2_grow_inode(a,b) ((void) 0)
diff --git a/libxfs/xfs_ag.c b/libxfs/xfs_ag.c
index b181c3e25f..e3465e0661 100644
--- a/libxfs/xfs_ag.c
+++ b/libxfs/xfs_ag.c
@@ -258,6 +258,7 @@ xfs_free_perag(
spin_unlock(&mp->m_perag_lock);
ASSERT(pag);
XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
+ xfs_defer_drain_free(&pag->pag_intents_drain);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_hash_destroy(pag);
@@ -383,6 +384,7 @@ xfs_initialize_perag(
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+ xfs_defer_drain_init(&pag->pag_intents_drain);
init_waitqueue_head(&pag->pagb_wait);
init_waitqueue_head(&pag->pag_active_wq);
pag->pagb_count = 0;
@@ -419,6 +421,7 @@ xfs_initialize_perag(
return 0;
out_remove_pag:
+ xfs_defer_drain_free(&pag->pag_intents_drain);
radix_tree_delete(&mp->m_perag_tree, index);
out_free_pag:
kmem_free(pag);
@@ -429,6 +432,7 @@ out_unwind_new_pags:
if (!pag)
break;
xfs_buf_hash_destroy(pag);
+ xfs_defer_drain_free(&pag->pag_intents_drain);
kmem_free(pag);
}
return error;
diff --git a/libxfs/xfs_ag.h b/libxfs/xfs_ag.h
index 8092eaba97..2e0aef87d6 100644
--- a/libxfs/xfs_ag.h
+++ b/libxfs/xfs_ag.h
@@ -101,6 +101,14 @@ struct xfs_perag {
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
+ /*
+ * We use xfs_drain to track the number of deferred log intent items
+ * that have been queued (but not yet processed) so that waiters (e.g.
+ * scrub) will not lock resources when other threads are in the middle
+ * of processing a chain of intent items only to find momentary
+ * inconsistencies.
+ */
+ struct xfs_defer_drain pag_intents_drain;
#endif /* __KERNEL__ */
};
diff --git a/libxfs/xfs_defer.c b/libxfs/xfs_defer.c
index c4f0269d61..77a94f58f4 100644
--- a/libxfs/xfs_defer.c
+++ b/libxfs/xfs_defer.c
@@ -392,6 +392,7 @@ xfs_defer_cancel_list(
list_for_each_safe(pwi, n, &dfp->dfp_work) {
list_del(pwi);
dfp->dfp_count--;
+ trace_xfs_defer_cancel_item(mp, dfp, pwi);
ops->cancel_item(pwi);
}
ASSERT(dfp->dfp_count == 0);
@@ -471,6 +472,7 @@ xfs_defer_finish_one(
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
+ trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
error = ops->finish_item(tp, dfp->dfp_done, li, &state);
if (error == -EAGAIN) {
int ret;
@@ -618,7 +620,7 @@ xfs_defer_add(
struct list_head *li)
{
struct xfs_defer_pending *dfp = NULL;
- const struct xfs_defer_op_type *ops;
+ const struct xfs_defer_op_type *ops = defer_op_types[type];
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
@@ -631,7 +633,6 @@ xfs_defer_add(
if (!list_empty(&tp->t_dfops)) {
dfp = list_last_entry(&tp->t_dfops,
struct xfs_defer_pending, dfp_list);
- ops = defer_op_types[dfp->dfp_type];
if (dfp->dfp_type != type ||
(ops->max_items && dfp->dfp_count >= ops->max_items))
dfp = NULL;
@@ -648,6 +649,7 @@ xfs_defer_add(
}
list_add_tail(li, &dfp->dfp_work);
+ trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
dfp->dfp_count++;
}