aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-21 12:28:39 +1100
committerDave Chinner <david@fromorbit.com>2010-12-21 12:28:39 +1100
commit1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd (patch)
treeb8b6bf968f663723b06aa68ec499e49f3cdfbad0 /fs/xfs/xfs_log.c
parent84f3c683c4d3f36d3c3ed320babd960a332ac458 (diff)
downloadkernel_samsung_smdk4412-1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd.zip
kernel_samsung_smdk4412-1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd.tar.gz
kernel_samsung_smdk4412-1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd.tar.bz2
xfs: convert l_tail_lsn to an atomic variable.
log->l_tail_lsn is currently protected by the log grant lock. The lock is only needed for serialising readers against writers, so we don't really need the lock if we make the l_tail_lsn variable an atomic. Converting the l_tail_lsn variable to an atomic64_t means we can start to peel back the grant lock from various operations. Also, provide functions to safely crack an atomic LSN variable into it's component pieces and to recombined the components into an atomic variable. Use them where appropriate. This also removes the need for explicitly holding a spinlock to read the l_tail_lsn on 32 bit platforms. Signed-off-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c56
1 files changed, 25 insertions, 31 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 70790eb..d118bf8 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -678,15 +678,11 @@ xfs_log_move_tail(xfs_mount_t *mp,
if (tail_lsn == 0)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- spin_lock(&log->l_grant_lock);
-
- /* Also an invalid lsn. 1 implies that we aren't passing in a valid
- * tail_lsn.
- */
- if (tail_lsn != 1) {
- log->l_tail_lsn = tail_lsn;
- }
+ /* tail_lsn == 1 implies that we weren't passed a valid value. */
+ if (tail_lsn != 1)
+ atomic64_set(&log->l_tail_lsn, tail_lsn);
+ spin_lock(&log->l_grant_lock);
if (!list_empty(&log->l_writeq)) {
#ifdef DEBUG
if (log->l_flags & XLOG_ACTIVE_RECOVERY)
@@ -789,21 +785,19 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
-xlog_assign_tail_lsn(xfs_mount_t *mp)
+xlog_assign_tail_lsn(
+ struct xfs_mount *mp)
{
- xfs_lsn_t tail_lsn;
- xlog_t *log = mp->m_log;
+ xfs_lsn_t tail_lsn;
+ struct log *log = mp->m_log;
tail_lsn = xfs_trans_ail_tail(mp->m_ail);
- spin_lock(&log->l_grant_lock);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- log->l_tail_lsn = tail_lsn;
- spin_unlock(&log->l_grant_lock);
+ atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
-} /* xlog_assign_tail_lsn */
-
+}
/*
* Return the space in the log between the tail and the head. The head
@@ -831,8 +825,8 @@ xlog_space_left(
int head_bytes;
xlog_crack_grant_head(head, &head_cycle, &head_bytes);
- tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
- tail_cycle = CYCLE_LSN(log->l_tail_lsn);
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
+ tail_bytes = BBTOB(tail_bytes);
if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
free_bytes = log->l_logsize - (head_bytes - tail_bytes);
else if (tail_cycle + 1 < head_cycle)
@@ -1009,8 +1003,8 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_prev_block = -1;
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
- log->l_tail_lsn = xlog_assign_lsn(1, 0);
- atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0));
+ xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
+ xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
@@ -1189,7 +1183,6 @@ xlog_grant_push_ail(
{
xfs_lsn_t threshold_lsn = 0;
xfs_lsn_t last_sync_lsn;
- xfs_lsn_t tail_lsn;
int free_blocks;
int free_bytes;
int threshold_block;
@@ -1198,7 +1191,6 @@ xlog_grant_push_ail(
ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
- tail_lsn = log->l_tail_lsn;
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
free_blocks = BTOBBT(free_bytes);
@@ -1213,8 +1205,9 @@ xlog_grant_push_ail(
if (free_blocks >= free_threshold)
return;
- threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
- threshold_cycle = CYCLE_LSN(tail_lsn);
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
+ &threshold_block);
+ threshold_block += free_threshold;
if (threshold_block >= log->l_logBBsize) {
threshold_block -= log->l_logBBsize;
threshold_cycle += 1;
@@ -2828,11 +2821,11 @@ xlog_state_release_iclog(
if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
/* update tail before writing to iclog */
- xlog_assign_tail_lsn(log->l_mp);
+ xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
sync++;
iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
- xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog, tail_lsn);
/* cycle incremented when incrementing curr_block */
}
spin_unlock(&log->l_icloglock);
@@ -3435,7 +3428,7 @@ STATIC void
xlog_verify_grant_tail(
struct log *log)
{
- xfs_lsn_t tail_lsn = log->l_tail_lsn;
+ int tail_cycle, tail_blocks;
int cycle, space;
/*
@@ -3445,9 +3438,10 @@ xlog_verify_grant_tail(
* check the byte count.
*/
xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
- if (CYCLE_LSN(tail_lsn) != cycle) {
- ASSERT(cycle - 1 == CYCLE_LSN(tail_lsn));
- ASSERT(space <= BBTOB(BLOCK_LSN(tail_lsn)));
+ xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
+ if (tail_cycle != cycle) {
+ ASSERT(cycle - 1 == tail_cycle);
+ ASSERT(space <= BBTOB(tail_blocks));
}
}