aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/lock_dlm.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-01-19 09:30:01 +0000
committerSteven Whitehouse <swhiteho@redhat.com>2011-01-21 09:39:08 +0000
commitbc015cb84129eb1451913cfebece270bf7a39e0f (patch)
tree4f116a61b802d87ae80051e9ae05d8fcb73d9ae7 /fs/gfs2/lock_dlm.c
parent2b1caf6ed7b888c95a1909d343799672731651a5 (diff)
downloadkernel_samsung_smdk4412-bc015cb84129eb1451913cfebece270bf7a39e0f.zip
kernel_samsung_smdk4412-bc015cb84129eb1451913cfebece270bf7a39e0f.tar.gz
kernel_samsung_smdk4412-bc015cb84129eb1451913cfebece270bf7a39e0f.tar.bz2
GFS2: Use RCU for glock hash table
This has a number of advantages: - Reduces contention on the hash table lock - Makes the code smaller and simpler - Should speed up glock dumps when under load - Removes ref count changing in examine_bucket - No longer need hash chain lock in glock_put() in common case There are some further changes which this enables and which we may do in the future. One is to look at using SLAB_RCU, and another is to look at using a per-cpu counter for the per-sb glock counter, since that is touched twice in the lifetime of each glock (but only used at umount time). Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'fs/gfs2/lock_dlm.c')
-rw-r--r--fs/gfs2/lock_dlm.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 6e493ae..c80485c 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -22,7 +22,6 @@ static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
- struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -31,12 +30,7 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
- kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
- kmem_cache_free(gfs2_glock_cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ call_rcu(&gl->gl_rcu, gfs2_glock_free);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
@@ -164,16 +158,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
}
-static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
- kmem_cache_free(cachep, gl);
- if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ call_rcu(&gl->gl_rcu, gfs2_glock_free);
return;
}