From d9ef75221a6247b758e1d7e18edb661996e4b7cf Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Mon, 17 May 2010 20:20:44 +0800 Subject: ocfs2:dlm: avoid dlm->ast_lock lockres->spinlock dependency break Currently we process a dirty lockres with the lockres->spinlock taken. While during the process, we may need to lock on dlm->ast_lock. This breaks the dependency of dlm->ast_lock(lock first) and lockres->spinlock(lock second). This patch fixes the problem. Since we can't release lockres->spinlock, we have to take dlm->ast_lock just before taking the lockres->spinlock and release it after lockres->spinlock is released. And use __dlm_queue_bast()/__dlm_queue_ast(), the nolock version, in dlm_shuffle_lists(). There are no too many locks on a lockres, so there is no performance harm. Signed-off-by: Wengang Wang Signed-off-by: Joel Becker --- fs/ocfs2/dlm/dlmthread.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'fs/ocfs2/dlm/dlmthread.c') diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 52ec020..0bdd28e 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -310,6 +310,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, * spinlock, and because we know that it is not migrating/ * recovering/in-progress, it is fine to reserve asts and * basts right before queueing them all throughout */ + assert_spin_locked(&dlm->ast_lock); assert_spin_locked(&res->spinlock); BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| DLM_LOCK_RES_RECOVERING| @@ -338,7 +339,7 @@ converting: /* queue the BAST if not already */ if (lock->ml.highest_blocked == LKM_IVMODE) { __dlm_lockres_reserve_ast(res); - dlm_queue_bast(dlm, lock); + __dlm_queue_bast(dlm, lock); } /* update the highest_blocked if needed */ if (lock->ml.highest_blocked < target->ml.convert_type) @@ -356,7 +357,7 @@ converting: can_grant = 0; if (lock->ml.highest_blocked == LKM_IVMODE) { __dlm_lockres_reserve_ast(res); - dlm_queue_bast(dlm, lock); + __dlm_queue_bast(dlm, lock); } if (lock->ml.highest_blocked < target->ml.convert_type) lock->ml.highest_blocked = @@ -384,7 +385,7 @@ converting: spin_unlock(&target->spinlock); __dlm_lockres_reserve_ast(res); - dlm_queue_ast(dlm, target); + __dlm_queue_ast(dlm, target); /* go back and check for more */ goto converting; } @@ -403,7 +404,7 @@ blocked: can_grant = 0; if (lock->ml.highest_blocked == LKM_IVMODE) { __dlm_lockres_reserve_ast(res); - dlm_queue_bast(dlm, lock); + __dlm_queue_bast(dlm, lock); } if (lock->ml.highest_blocked < target->ml.type) lock->ml.highest_blocked = target->ml.type; @@ -419,7 +420,7 @@ blocked: can_grant = 0; if (lock->ml.highest_blocked == LKM_IVMODE) { __dlm_lockres_reserve_ast(res); - dlm_queue_bast(dlm, lock); + __dlm_queue_bast(dlm, lock); } if (lock->ml.highest_blocked < target->ml.type) lock->ml.highest_blocked = target->ml.type; @@ -445,7 +446,7 @@ blocked: spin_unlock(&target->spinlock); __dlm_lockres_reserve_ast(res); - dlm_queue_ast(dlm, target); + __dlm_queue_ast(dlm, target); /* go back and check for more */ goto converting; } @@ -675,6 +676,7 @@ static int dlm_thread(void *data) /* lockres can be re-dirtied/re-added to the * dirty_list in this gap, but that is ok */ + spin_lock(&dlm->ast_lock); spin_lock(&res->spinlock); if (res->owner != dlm->node_num) { __dlm_print_one_lock_resource(res); @@ -695,6 +697,7 @@ static int dlm_thread(void *data) /* move it to the tail and keep going */ res->state &= ~DLM_LOCK_RES_DIRTY; spin_unlock(&res->spinlock); + spin_unlock(&dlm->ast_lock); mlog(0, "delaying list shuffling for in-" "progress lockres %.*s, state=%d\n", res->lockname.len, res->lockname.name, @@ -716,6 +719,7 @@ static int dlm_thread(void *data) dlm_shuffle_lists(dlm, res); res->state &= ~DLM_LOCK_RES_DIRTY; spin_unlock(&res->spinlock); + spin_unlock(&dlm->ast_lock); dlm_lockres_calc_usage(dlm, res); -- cgit v1.1