aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-09-22 10:47:20 +1000
committerAlex Elder <aelder@sgi.com>2010-10-18 15:07:44 -0500
commite176579e70118ed7cfdb60f963628fe0ca771f3d (patch)
treee2d252fcd692aa4e3907300521e2af81c974f875 /fs/xfs/xfs_mount.c
parentbd32d25a7cf7242512e77e70bab63df4402ab91c (diff)
downloadkernel_samsung_smdk4412-e176579e70118ed7cfdb60f963628fe0ca771f3d.zip
kernel_samsung_smdk4412-e176579e70118ed7cfdb60f963628fe0ca771f3d.tar.gz
kernel_samsung_smdk4412-e176579e70118ed7cfdb60f963628fe0ca771f3d.tar.bz2
xfs: lockless per-ag lookups
When we start taking a reference to the per-ag for every cached buffer in the system, kernel lockstat profiling on an 8-way create workload shows the mp->m_perag_lock has higher acquisition rates than the inode lock and has significantly more contention. That is, it becomes the highest contended lock in the system. The perag lookup is trivial to convert to lock-less RCU lookups because perag structures never go away. Hence the only thing we need to protect against is tree structure changes during a grow. This can be done simply by replacing the locking in xfs_perag_get() with RCU read locking. This removes the mp->m_perag_lock completely from this path. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 00c7a87..14fc6e9 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -199,6 +199,8 @@ xfs_uuid_unmount(
/*
* Reference counting access wrappers to the perag structures.
+ * Because we never free per-ag structures, the only thing we
+ * have to protect against changes is the tree structure itself.
*/
struct xfs_perag *
xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,13 +208,13 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
struct xfs_perag *pag;
int ref = 0;
- spin_lock(&mp->m_perag_lock);
+ rcu_read_lock();
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
if (pag) {
ASSERT(atomic_read(&pag->pag_ref) >= 0);
ref = atomic_inc_return(&pag->pag_ref);
}
- spin_unlock(&mp->m_perag_lock);
+ rcu_read_unlock();
trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
return pag;
}
@@ -227,10 +229,18 @@ xfs_perag_put(struct xfs_perag *pag)
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
}
+STATIC void
+__xfs_free_perag(
+ struct rcu_head *head)
+{
+ struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ kmem_free(pag);
+}
+
/*
- * Free up the resources associated with a mount structure. Assume that
- * the structure was initially zeroed, so we can tell which fields got
- * initialized.
+ * Free up the per-ag resources associated with the mount structure.
*/
STATIC void
xfs_free_perag(
@@ -242,10 +252,9 @@ xfs_free_perag(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
spin_lock(&mp->m_perag_lock);
pag = radix_tree_delete(&mp->m_perag_tree, agno);
- ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
spin_unlock(&mp->m_perag_lock);
- kmem_free(pag);
+ ASSERT(pag);
+ call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}