aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2023-05-09 11:29:41 +0200
committerCarlos Maiolino <cem@kernel.org>2023-05-09 20:57:48 +0200
commit7ab297988a0b0011688d81835bf8e9bd7b95bdb3 (patch)
treeae831d0271a17cda38b7a4f07b9329a989a3383c
parent9cdecd7e7ab388c811073946795a48ed329f2312 (diff)
downloadxfsprogs-dev-7ab297988a0b0011688d81835bf8e9bd7b95bdb3.tar.gz
xfs: active perag reference counting
Source kernel commit: c4d5660afbdcd3f0fa3bbf563e059511fba8445f We need to be able to dynamically remove instantiated AGs from memory safely, either for shrinking the filesystem or paging AG state in and out of memory (e.g. supporting millions of AGs). This means we need to be able to safely exclude operations from accessing perags while dynamic removal is in progress. To do this, introduce the concept of active and passive references. Active references are required for high level operations that make use of an AG for a given operation (e.g. allocation) and pin the perag in memory for the duration of the operation that is operating on the perag (e.g. transaction scope). This means we can fail to get an active reference to an AG, hence callers of the new active reference API must be able to handle lookup failure gracefully. Passive references are used in low level code, where we might need to access the perag structure for the purposes of completing high level operations. For example, buffers need to use passive references because: - we need to be able to do metadata IO during operations like grow and shrink transactions where high level active references to the AG have already been blocked - buffers need to pin the perag until they are reclaimed from memory, something that high level code has no direct control over. - unused cached buffers should not prevent a shrink from being started. Hence we have active references that will form exclusion barriers for operations to be performed on an AG, and passive references that will prevent reclaim of the perag until all objects with passive references have been reclaimed themselves. This patch introduce xfs_perag_grab()/xfs_perag_rele() as the API for active AG reference functionality. We also need to convert the for_each_perag*() iterators to use active references, which will start the process of converting high level code over to using active references. Conversion of non-iterator based code to active references will be done in followup patches. Note that the implementation using reference counting is really just a development vehicle for the API to ensure we don't have any leaks in the callers. Once we need to remove perag structures from memory dyanmically, we will need a much more robust per-ag state transition mechanism for preventing new references from being taken while we wait for existing references to drain before removal from memory can occur.... Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Allison Henderson <allison.henderson@oracle.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
-rw-r--r--include/atomic.h5
-rw-r--r--include/xfs_mount.h5
-rw-r--r--include/xfs_trace.h3
-rw-r--r--libxfs/xfs_ag.c70
-rw-r--r--libxfs/xfs_ag.h31
5 files changed, 105 insertions, 9 deletions
diff --git a/include/atomic.h b/include/atomic.h
index dc6438a3a4..9c4aa5849a 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -48,6 +48,11 @@ static inline bool atomic_add_unless(atomic_t *a, int v, int u)
return o != u;
}
+static inline bool atomic_inc_not_zero(atomic_t *a)
+{
+ return atomic_add_unless(a, 1, 0);
+}
+
static inline bool atomic_dec_and_lock(atomic_t *a, spinlock_t *lock)
{
if (atomic_add_unless(a, -1, 1))
diff --git a/include/xfs_mount.h b/include/xfs_mount.h
index 59a66eb71a..892a2efdd1 100644
--- a/include/xfs_mount.h
+++ b/include/xfs_mount.h
@@ -282,4 +282,9 @@ struct xfs_dquot {
int q_type;
};
+typedef struct wait_queue_head {
+} wait_queue_head_t;
+
+static inline void wake_up(wait_queue_head_t *wq) {}
+
#endif /* __XFS_MOUNT_H__ */
diff --git a/include/xfs_trace.h b/include/xfs_trace.h
index db01af3981..65c2a225ec 100644
--- a/include/xfs_trace.h
+++ b/include/xfs_trace.h
@@ -183,7 +183,10 @@
#define trace_xfs_write_extent(a,b,c,d) ((c) = (c))
#define trace_xfs_perag_get(a,b,c,d) ((c) = (c))
#define trace_xfs_perag_get_tag(a,b,c,d) ((c) = (c))
+#define trace_xfs_perag_grab(...) ((void) 0)
+#define trace_xfs_perag_grab_tag(...) ((void) 0)
#define trace_xfs_perag_put(a,b,c,d) ((c) = (c))
+#define trace_xfs_perag_rele(...) ((void) 0)
#define trace_xfs_trans_alloc(a,b) ((void) 0)
#define trace_xfs_trans_cancel(a,b) ((void) 0)
diff --git a/libxfs/xfs_ag.c b/libxfs/xfs_ag.c
index 70ae1da764..19a5e5dc84 100644
--- a/libxfs/xfs_ag.c
+++ b/libxfs/xfs_ag.c
@@ -93,6 +93,68 @@ xfs_perag_put(
}
/*
+ * Active references for perag structures. This is for short term access to the
+ * per ag structures for walking trees or accessing state. If an AG is being
+ * shrunk or is offline, then this will fail to find that AG and return NULL
+ * instead.
+ */
+struct xfs_perag *
+xfs_perag_grab(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+
+ rcu_read_lock();
+ pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ if (pag) {
+ trace_xfs_perag_grab(mp, pag->pag_agno,
+ atomic_read(&pag->pag_active_ref), _RET_IP_);
+ if (!atomic_inc_not_zero(&pag->pag_active_ref))
+ pag = NULL;
+ }
+ rcu_read_unlock();
+ return pag;
+}
+
+/*
+ * search from @first to find the next perag with the given tag set.
+ */
+struct xfs_perag *
+xfs_perag_grab_tag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t first,
+ int tag)
+{
+ struct xfs_perag *pag;
+ int found;
+
+ rcu_read_lock();
+ found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+ (void **)&pag, first, 1, tag);
+ if (found <= 0) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ trace_xfs_perag_grab_tag(mp, pag->pag_agno,
+ atomic_read(&pag->pag_active_ref), _RET_IP_);
+ if (!atomic_inc_not_zero(&pag->pag_active_ref))
+ pag = NULL;
+ rcu_read_unlock();
+ return pag;
+}
+
+void
+xfs_perag_rele(
+ struct xfs_perag *pag)
+{
+ trace_xfs_perag_rele(pag->pag_mount, pag->pag_agno,
+ atomic_read(&pag->pag_active_ref), _RET_IP_);
+ if (atomic_dec_and_test(&pag->pag_active_ref))
+ wake_up(&pag->pag_active_wq);
+}
+
+/*
* xfs_initialize_perag_data
*
* Read in each per-ag structure so we can count up the number of
@@ -194,6 +256,10 @@ xfs_free_perag(
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_buf_hash_destroy(pag);
+ /* drop the mount's active reference */
+ xfs_perag_rele(pag);
+ XFS_IS_CORRUPT(pag->pag_mount,
+ atomic_read(&pag->pag_active_ref) != 0);
call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}
@@ -312,6 +378,7 @@ xfs_initialize_perag(
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
init_waitqueue_head(&pag->pagb_wait);
+ init_waitqueue_head(&pag->pag_active_wq);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
#endif /* __KERNEL__ */
@@ -320,6 +387,9 @@ xfs_initialize_perag(
if (error)
goto out_remove_pag;
+ /* Active ref owned by mount indicates AG is online. */
+ atomic_set(&pag->pag_active_ref, 1);
+
/* first new pag is fully initialized */
if (first_initialised == NULLAGNUMBER)
first_initialised = index;
diff --git a/libxfs/xfs_ag.h b/libxfs/xfs_ag.h
index 191b22b9a3..aeb21c8df2 100644
--- a/libxfs/xfs_ag.h
+++ b/libxfs/xfs_ag.h
@@ -32,7 +32,9 @@ struct xfs_ag_resv {
struct xfs_perag {
struct xfs_mount *pag_mount; /* owner filesystem */
xfs_agnumber_t pag_agno; /* AG this structure belongs to */
- atomic_t pag_ref; /* perag reference count */
+ atomic_t pag_ref; /* passive reference count */
+ atomic_t pag_active_ref; /* active reference count */
+ wait_queue_head_t pag_active_wq;/* woken active_ref falls to zero */
char pagf_init; /* this agf's entry is initialized */
char pagi_init; /* this agi's entry is initialized */
char pagf_metadata; /* the agf is preferred to be metadata */
@@ -111,11 +113,18 @@ int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
void xfs_free_perag(struct xfs_mount *mp);
+/* Passive AG references */
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
unsigned int tag);
void xfs_perag_put(struct xfs_perag *pag);
+/* Active AG references */
+struct xfs_perag *xfs_perag_grab(struct xfs_mount *, xfs_agnumber_t);
+struct xfs_perag *xfs_perag_grab_tag(struct xfs_mount *, xfs_agnumber_t,
+ int tag);
+void xfs_perag_rele(struct xfs_perag *pag);
+
/*
* Per-ag geometry infomation and validation
*/
@@ -193,14 +202,18 @@ xfs_perag_next(
struct xfs_mount *mp = pag->pag_mount;
*agno = pag->pag_agno + 1;
- xfs_perag_put(pag);
- if (*agno > end_agno)
- return NULL;
- return xfs_perag_get(mp, *agno);
+ xfs_perag_rele(pag);
+ while (*agno <= end_agno) {
+ pag = xfs_perag_grab(mp, *agno);
+ if (pag)
+ return pag;
+ (*agno)++;
+ }
+ return NULL;
}
#define for_each_perag_range(mp, agno, end_agno, pag) \
- for ((pag) = xfs_perag_get((mp), (agno)); \
+ for ((pag) = xfs_perag_grab((mp), (agno)); \
(pag) != NULL; \
(pag) = xfs_perag_next((pag), &(agno), (end_agno)))
@@ -213,11 +226,11 @@ xfs_perag_next(
for_each_perag_from((mp), (agno), (pag))
#define for_each_perag_tag(mp, agno, pag, tag) \
- for ((agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
+ for ((agno) = 0, (pag) = xfs_perag_grab_tag((mp), 0, (tag)); \
(pag) != NULL; \
(agno) = (pag)->pag_agno + 1, \
- xfs_perag_put(pag), \
- (pag) = xfs_perag_get_tag((mp), (agno), (tag)))
+ xfs_perag_rele(pag), \
+ (pag) = xfs_perag_grab_tag((mp), (agno), (tag)))
struct aghdr_init_data {
/* per ag data */