diff options
author | Christoph Hellwig <hch@infradead.org> | 2009-12-14 23:14:59 +0000 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2009-12-14 23:08:16 -0600 |
commit | 0b1b213fcf3a8486ada99a2bab84ab8c6f51b264 (patch) | |
tree | 661fd8da7487231224c77b95c33986cb7d7b41ef /fs/xfs/support | |
parent | 6ef3554422e2c7e7aa424ba63737da498881dff4 (diff) | |
download | kernel_samsung_smdk4412-0b1b213fcf3a8486ada99a2bab84ab8c6f51b264.zip kernel_samsung_smdk4412-0b1b213fcf3a8486ada99a2bab84ab8c6f51b264.tar.gz kernel_samsung_smdk4412-0b1b213fcf3a8486ada99a2bab84ab8c6f51b264.tar.bz2 |
xfs: event tracing support
Convert the old xfs tracing support that could only be used with the
out of tree kdb and xfsidbg patches to use the generic event tracer.
To use it make sure CONFIG_EVENT_TRACING is enabled and then enable
all xfs trace channels by:
echo 1 > /sys/kernel/debug/tracing/events/xfs/enable
or alternatively enable single events by just doing the same in one
event subdirectory, e.g.
echo 1 > /sys/kernel/debug/tracing/events/xfs/xfs_ihold/enable
or set more complex filters, etc. In Documentation/trace/events.txt
all this is desctribed in more detail. To reads the events do a
cat /sys/kernel/debug/tracing/trace
Compared to the last posting this patch converts the tracing mostly to
the one tracepoint per callsite model that other users of the new
tracing facility also employ. This allows a very fine-grained control
of the tracing, a cleaner output of the traces and also enables the
perf tool to use each tracepoint as a virtual performance counter,
allowing us to e.g. count how often certain workloads git various
spots in XFS. Take a look at
http://lwn.net/Articles/346470/
for some examples.
Also the btree tracing isn't included at all yet, as it will require
additional core tracing features not in mainline yet, I plan to
deliver it later.
And the really nice thing about this patch is that it actually removes
many lines of code while adding this nice functionality:
fs/xfs/Makefile | 8
fs/xfs/linux-2.6/xfs_acl.c | 1
fs/xfs/linux-2.6/xfs_aops.c | 52 -
fs/xfs/linux-2.6/xfs_aops.h | 2
fs/xfs/linux-2.6/xfs_buf.c | 117 +--
fs/xfs/linux-2.6/xfs_buf.h | 33
fs/xfs/linux-2.6/xfs_fs_subr.c | 3
fs/xfs/linux-2.6/xfs_ioctl.c | 1
fs/xfs/linux-2.6/xfs_ioctl32.c | 1
fs/xfs/linux-2.6/xfs_iops.c | 1
fs/xfs/linux-2.6/xfs_linux.h | 1
fs/xfs/linux-2.6/xfs_lrw.c | 87 --
fs/xfs/linux-2.6/xfs_lrw.h | 45 -
fs/xfs/linux-2.6/xfs_super.c | 104 ---
fs/xfs/linux-2.6/xfs_super.h | 7
fs/xfs/linux-2.6/xfs_sync.c | 1
fs/xfs/linux-2.6/xfs_trace.c | 75 ++
fs/xfs/linux-2.6/xfs_trace.h | 1369 +++++++++++++++++++++++++++++++++++++++++
fs/xfs/linux-2.6/xfs_vnode.h | 4
fs/xfs/quota/xfs_dquot.c | 110 ---
fs/xfs/quota/xfs_dquot.h | 21
fs/xfs/quota/xfs_qm.c | 40 -
fs/xfs/quota/xfs_qm_syscalls.c | 4
fs/xfs/support/ktrace.c | 323 ---------
fs/xfs/support/ktrace.h | 85 --
fs/xfs/xfs.h | 16
fs/xfs/xfs_ag.h | 14
fs/xfs/xfs_alloc.c | 230 +-----
fs/xfs/xfs_alloc.h | 27
fs/xfs/xfs_alloc_btree.c | 1
fs/xfs/xfs_attr.c | 107 ---
fs/xfs/xfs_attr.h | 10
fs/xfs/xfs_attr_leaf.c | 14
fs/xfs/xfs_attr_sf.h | 40 -
fs/xfs/xfs_bmap.c | 507 +++------------
fs/xfs/xfs_bmap.h | 49 -
fs/xfs/xfs_bmap_btree.c | 6
fs/xfs/xfs_btree.c | 5
fs/xfs/xfs_btree_trace.h | 17
fs/xfs/xfs_buf_item.c | 87 --
fs/xfs/xfs_buf_item.h | 20
fs/xfs/xfs_da_btree.c | 3
fs/xfs/xfs_da_btree.h | 7
fs/xfs/xfs_dfrag.c | 2
fs/xfs/xfs_dir2.c | 8
fs/xfs/xfs_dir2_block.c | 20
fs/xfs/xfs_dir2_leaf.c | 21
fs/xfs/xfs_dir2_node.c | 27
fs/xfs/xfs_dir2_sf.c | 26
fs/xfs/xfs_dir2_trace.c | 216 ------
fs/xfs/xfs_dir2_trace.h | 72 --
fs/xfs/xfs_filestream.c | 8
fs/xfs/xfs_fsops.c | 2
fs/xfs/xfs_iget.c | 111 ---
fs/xfs/xfs_inode.c | 67 --
fs/xfs/xfs_inode.h | 76 --
fs/xfs/xfs_inode_item.c | 5
fs/xfs/xfs_iomap.c | 85 --
fs/xfs/xfs_iomap.h | 8
fs/xfs/xfs_log.c | 181 +----
fs/xfs/xfs_log_priv.h | 20
fs/xfs/xfs_log_recover.c | 1
fs/xfs/xfs_mount.c | 2
fs/xfs/xfs_quota.h | 8
fs/xfs/xfs_rename.c | 1
fs/xfs/xfs_rtalloc.c | 1
fs/xfs/xfs_rw.c | 3
fs/xfs/xfs_trans.h | 47 +
fs/xfs/xfs_trans_buf.c | 62 -
fs/xfs/xfs_vnodeops.c | 8
70 files changed, 2151 insertions(+), 2592 deletions(-)
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/support')
-rw-r--r-- | fs/xfs/support/ktrace.c | 323 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.h | 85 |
2 files changed, 0 insertions, 408 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c deleted file mode 100644 index 2d494c2..0000000 --- a/fs/xfs/support/ktrace.c +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include <xfs.h> - -static kmem_zone_t *ktrace_hdr_zone; -static kmem_zone_t *ktrace_ent_zone; -static int ktrace_zentries; - -void __init -ktrace_init(int zentries) -{ - ktrace_zentries = roundup_pow_of_two(zentries); - - ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), - "ktrace_hdr"); - ASSERT(ktrace_hdr_zone); - - ktrace_ent_zone = kmem_zone_init(ktrace_zentries - * sizeof(ktrace_entry_t), - "ktrace_ent"); - ASSERT(ktrace_ent_zone); -} - -void __exit -ktrace_uninit(void) -{ - kmem_zone_destroy(ktrace_hdr_zone); - kmem_zone_destroy(ktrace_ent_zone); -} - -/* - * ktrace_alloc() - * - * Allocate a ktrace header and enough buffering for the given - * number of entries. Round the number of entries up to a - * power of 2 so we can do fast masking to get the index from - * the atomic index counter. - */ -ktrace_t * -ktrace_alloc(int nentries, unsigned int __nocast sleep) -{ - ktrace_t *ktp; - ktrace_entry_t *ktep; - int entries; - - ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); - - if (ktp == (ktrace_t*)NULL) { - /* - * KM_SLEEP callers don't expect failure. - */ - if (sleep & KM_SLEEP) - panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); - - return NULL; - } - - /* - * Special treatment for buffers with the ktrace_zentries entries - */ - entries = roundup_pow_of_two(nentries); - if (entries == ktrace_zentries) { - ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, - sleep); - } else { - ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), - sleep | KM_LARGE); - } - - if (ktep == NULL) { - /* - * KM_SLEEP callers don't expect failure. - */ - if (sleep & KM_SLEEP) - panic("ktrace_alloc: NULL memory on KM_SLEEP request!"); - - kmem_free(ktp); - - return NULL; - } - - ktp->kt_entries = ktep; - ktp->kt_nentries = entries; - ASSERT(is_power_of_2(entries)); - ktp->kt_index_mask = entries - 1; - atomic_set(&ktp->kt_index, 0); - ktp->kt_rollover = 0; - return ktp; -} - - -/* - * ktrace_free() - * - * Free up the ktrace header and buffer. It is up to the caller - * to ensure that no-one is referencing it. - */ -void -ktrace_free(ktrace_t *ktp) -{ - if (ktp == (ktrace_t *)NULL) - return; - - /* - * Special treatment for the Vnode trace buffer. - */ - if (ktp->kt_nentries == ktrace_zentries) - kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); - else - kmem_free(ktp->kt_entries); - - kmem_zone_free(ktrace_hdr_zone, ktp); -} - - -/* - * Enter the given values into the "next" entry in the trace buffer. - * kt_index is always the index of the next entry to be filled. - */ -void -ktrace_enter( - ktrace_t *ktp, - void *val0, - void *val1, - void *val2, - void *val3, - void *val4, - void *val5, - void *val6, - void *val7, - void *val8, - void *val9, - void *val10, - void *val11, - void *val12, - void *val13, - void *val14, - void *val15) -{ - int index; - ktrace_entry_t *ktep; - - ASSERT(ktp != NULL); - - /* - * Grab an entry by pushing the index up to the next one. - */ - index = atomic_add_return(1, &ktp->kt_index); - index = (index - 1) & ktp->kt_index_mask; - if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) - ktp->kt_rollover = 1; - - ASSERT((index >= 0) && (index < ktp->kt_nentries)); - - ktep = &(ktp->kt_entries[index]); - - ktep->val[0] = val0; - ktep->val[1] = val1; - ktep->val[2] = val2; - ktep->val[3] = val3; - ktep->val[4] = val4; - ktep->val[5] = val5; - ktep->val[6] = val6; - ktep->val[7] = val7; - ktep->val[8] = val8; - ktep->val[9] = val9; - ktep->val[10] = val10; - ktep->val[11] = val11; - ktep->val[12] = val12; - ktep->val[13] = val13; - ktep->val[14] = val14; - ktep->val[15] = val15; -} - -/* - * Return the number of entries in the trace buffer. - */ -int -ktrace_nentries( - ktrace_t *ktp) -{ - int index; - if (ktp == NULL) - return 0; - - index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; - return (ktp->kt_rollover ? ktp->kt_nentries : index); -} - -/* - * ktrace_first() - * - * This is used to find the start of the trace buffer. - * In conjunction with ktrace_next() it can be used to - * iterate through the entire trace buffer. This code does - * not do any locking because it is assumed that it is called - * from the debugger. - * - * The caller must pass in a pointer to a ktrace_snap - * structure in which we will keep some state used to - * iterate through the buffer. This state must not touched - * by any code outside of this module. - */ -ktrace_entry_t * -ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) -{ - ktrace_entry_t *ktep; - int index; - int nentries; - - if (ktp->kt_rollover) - index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; - else - index = 0; - - ktsp->ks_start = index; - ktep = &(ktp->kt_entries[index]); - - nentries = ktrace_nentries(ktp); - index++; - if (index < nentries) { - ktsp->ks_index = index; - } else { - ktsp->ks_index = 0; - if (index > nentries) - ktep = NULL; - } - return ktep; -} - -/* - * ktrace_next() - * - * This is used to iterate through the entries of the given - * trace buffer. The caller must pass in the ktrace_snap_t - * structure initialized by ktrace_first(). The return value - * will be either a pointer to the next ktrace_entry or NULL - * if all of the entries have been traversed. - */ -ktrace_entry_t * -ktrace_next( - ktrace_t *ktp, - ktrace_snap_t *ktsp) -{ - int index; - ktrace_entry_t *ktep; - - index = ktsp->ks_index; - if (index == ktsp->ks_start) { - ktep = NULL; - } else { - ktep = &ktp->kt_entries[index]; - } - - index++; - if (index == ktrace_nentries(ktp)) { - ktsp->ks_index = 0; - } else { - ktsp->ks_index = index; - } - - return ktep; -} - -/* - * ktrace_skip() - * - * Skip the next "count" entries and return the entry after that. - * Return NULL if this causes us to iterate past the beginning again. - */ -ktrace_entry_t * -ktrace_skip( - ktrace_t *ktp, - int count, - ktrace_snap_t *ktsp) -{ - int index; - int new_index; - ktrace_entry_t *ktep; - int nentries = ktrace_nentries(ktp); - - index = ktsp->ks_index; - new_index = index + count; - while (new_index >= nentries) { - new_index -= nentries; - } - if (index == ktsp->ks_start) { - /* - * We've iterated around to the start, so we're done. - */ - ktep = NULL; - } else if ((new_index < index) && (index < ktsp->ks_index)) { - /* - * We've skipped past the start again, so we're done. - */ - ktep = NULL; - ktsp->ks_index = ktsp->ks_start; - } else { - ktep = &(ktp->kt_entries[new_index]); - new_index++; - if (new_index == nentries) { - ktsp->ks_index = 0; - } else { - ktsp->ks_index = new_index; - } - } - return ktep; -} diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h deleted file mode 100644 index 741d694..0000000 --- a/fs/xfs/support/ktrace.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_KTRACE_H__ -#define __XFS_SUPPORT_KTRACE_H__ - -/* - * Trace buffer entry structure. - */ -typedef struct ktrace_entry { - void *val[16]; -} ktrace_entry_t; - -/* - * Trace buffer header structure. - */ -typedef struct ktrace { - int kt_nentries; /* number of entries in trace buf */ - atomic_t kt_index; /* current index in entries */ - unsigned int kt_index_mask; - int kt_rollover; - ktrace_entry_t *kt_entries; /* buffer of entries */ -} ktrace_t; - -/* - * Trace buffer snapshot structure. - */ -typedef struct ktrace_snap { - int ks_start; /* kt_index at time of snap */ - int ks_index; /* current index */ -} ktrace_snap_t; - - -#ifdef CONFIG_XFS_TRACE - -extern void ktrace_init(int zentries); -extern void ktrace_uninit(void); - -extern ktrace_t *ktrace_alloc(int, unsigned int __nocast); -extern void ktrace_free(ktrace_t *); - -extern void ktrace_enter( - ktrace_t *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *, - void *); - -extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *); -extern int ktrace_nentries(ktrace_t *); -extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *); -extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *); - -#else -#define ktrace_init(x) do { } while (0) -#define ktrace_uninit() do { } while (0) -#endif /* CONFIG_XFS_TRACE */ - -#endif /* __XFS_SUPPORT_KTRACE_H__ */ |