aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_log.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2008-03-06 13:44:14 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-04-18 11:38:10 +1000
commit155cc6b784a959ed456fe46dca522e1d28b3b718 (patch)
treee88b9041570f299497a6f9aac7e01797affed205 /fs/xfs/xfs_log.c
parentb589334c7a1fff85d2f009d5db4c34fad48925e9 (diff)
downloadkernel_samsung_smdk4412-155cc6b784a959ed456fe46dca522e1d28b3b718.zip
kernel_samsung_smdk4412-155cc6b784a959ed456fe46dca522e1d28b3b718.tar.gz
kernel_samsung_smdk4412-155cc6b784a959ed456fe46dca522e1d28b3b718.tar.bz2
[XFS] Use atomics for iclog reference counting
Now that we update the log tail LSN less frequently on transaction completion, we pass the contention straight to the global log state lock (l_iclog_lock) during transaction completion. We currently have to take this lock to decrement the iclog reference count. there is a reference count on each iclog, so we need to take þhe global lock for all refcount changes. When large numbers of processes are all doing small trnasctions, the iclog reference counts will be quite high, and the state change that absolutely requires the l_iclog_lock is the except rather than the norm. Change the reference counting on the iclogs to use atomic_inc/dec so that we can use atomic_dec_and_lock during transaction completion and avoid the need for grabbing the l_iclog_lock for every reference count decrement except the one that matters - the last. SGI-PV: 975671 SGI-Modid: xfs-linux-melb:xfs-kern:30505a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r--fs/xfs/xfs_log.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 2e35077..1fa9809 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -675,7 +675,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- iclog->ic_refcnt++;
+ atomic_inc(&iclog->ic_refcnt);
spin_unlock(&log->l_icloglock);
xlog_state_want_sync(log, iclog);
(void) xlog_state_release_iclog(log, iclog);
@@ -713,7 +713,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
*/
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- iclog->ic_refcnt++;
+ atomic_inc(&iclog->ic_refcnt);
spin_unlock(&log->l_icloglock);
xlog_state_want_sync(log, iclog);
@@ -1405,7 +1405,7 @@ xlog_sync(xlog_t *log,
int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
XFS_STATS_INC(xs_log_writes);
- ASSERT(iclog->ic_refcnt == 0);
+ ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
/* Add for LR header */
count_init = log->l_iclog_hsize + iclog->ic_offset;
@@ -2309,7 +2309,7 @@ xlog_state_done_syncing(
ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
iclog->ic_state == XLOG_STATE_IOERROR);
- ASSERT(iclog->ic_refcnt == 0);
+ ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
@@ -2391,7 +2391,7 @@ restart:
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
head = &iclog->ic_header;
- iclog->ic_refcnt++; /* prevents sync */
+ atomic_inc(&iclog->ic_refcnt); /* prevents sync */
log_offset = iclog->ic_offset;
/* On the 1st write to an iclog, figure out lsn. This works
@@ -2423,12 +2423,12 @@ restart:
xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
/* If I'm the only one writing to this iclog, sync it to disk */
- if (iclog->ic_refcnt == 1) {
+ if (atomic_read(&iclog->ic_refcnt) == 1) {
spin_unlock(&log->l_icloglock);
if ((error = xlog_state_release_iclog(log, iclog)))
return error;
} else {
- iclog->ic_refcnt--;
+ atomic_dec(&iclog->ic_refcnt);
spin_unlock(&log->l_icloglock);
}
goto restart;
@@ -2819,18 +2819,21 @@ xlog_state_release_iclog(
{
int sync = 0; /* do we sync? */
- spin_lock(&log->l_icloglock);
+ if (iclog->ic_state & XLOG_STATE_IOERROR)
+ return XFS_ERROR(EIO);
+
+ ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
+ if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
+ return 0;
+
if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
return XFS_ERROR(EIO);
}
-
- ASSERT(iclog->ic_refcnt > 0);
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
iclog->ic_state == XLOG_STATE_WANT_SYNC);
- if (--iclog->ic_refcnt == 0 &&
- iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
/* update tail before writing to iclog */
xlog_assign_tail_lsn(log->l_mp);
sync++;
@@ -2950,7 +2953,8 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
* previous iclog and go to sleep.
*/
if (iclog->ic_state == XLOG_STATE_DIRTY ||
- (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) {
+ (atomic_read(&iclog->ic_refcnt) == 0
+ && iclog->ic_offset == 0)) {
iclog = iclog->ic_prev;
if (iclog->ic_state == XLOG_STATE_ACTIVE ||
iclog->ic_state == XLOG_STATE_DIRTY)
@@ -2958,14 +2962,14 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
else
goto maybe_sleep;
} else {
- if (iclog->ic_refcnt == 0) {
+ if (atomic_read(&iclog->ic_refcnt) == 0) {
/* We are the only one with access to this
* iclog. Flush it out now. There should
* be a roundoff of zero to show that someone
* has already taken care of the roundoff from
* the previous sync.
*/
- iclog->ic_refcnt++;
+ atomic_inc(&iclog->ic_refcnt);
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock);
@@ -3097,7 +3101,7 @@ try_again:
already_slept = 1;
goto try_again;
} else {
- iclog->ic_refcnt++;
+ atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))