summaryrefslogtreecommitdiffstats
path: root/third_party/lcov/contrib
diff options
context:
space:
mode:
authorjrg@chromium.org <jrg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-04-03 03:26:46 +0000
committerjrg@chromium.org <jrg@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-04-03 03:26:46 +0000
commit1e1f6af9207b71845aa2f360e5d231825f317a09 (patch)
treec369805fe40cdaa9ee646cf98e44854be1538414 /third_party/lcov/contrib
parent4acc19a6f31abef9608546d10f107240603ca57e (diff)
downloadchromium_src-1e1f6af9207b71845aa2f360e5d231825f317a09.zip
chromium_src-1e1f6af9207b71845aa2f360e5d231825f317a09.tar.gz
chromium_src-1e1f6af9207b71845aa2f360e5d231825f317a09.tar.bz2
lcov-1.7 into third_party for code coverage on POSIX systems.
Non-lcov-1.7 files are lcov/LICENCE lcov/README.chromium lcov/bin/mcov Review URL: http://codereview.chromium.org/57083 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@13066 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party/lcov/contrib')
-rw-r--r--third_party/lcov/contrib/galaxy/CHANGES1
-rw-r--r--third_party/lcov/contrib/galaxy/README48
-rwxr-xr-xthird_party/lcov/contrib/galaxy/conglomerate_functions.pl195
-rwxr-xr-xthird_party/lcov/contrib/galaxy/gen_makefile.sh129
-rwxr-xr-xthird_party/lcov/contrib/galaxy/genflat.pl1238
-rwxr-xr-xthird_party/lcov/contrib/galaxy/posterize.pl312
6 files changed, 1923 insertions, 0 deletions
diff --git a/third_party/lcov/contrib/galaxy/CHANGES b/third_party/lcov/contrib/galaxy/CHANGES
new file mode 100644
index 0000000..b09883b
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/CHANGES
@@ -0,0 +1 @@
+09-04-2003 Initial checkin
diff --git a/third_party/lcov/contrib/galaxy/README b/third_party/lcov/contrib/galaxy/README
new file mode 100644
index 0000000..e21c509
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/README
@@ -0,0 +1,48 @@
+-------------------------------------------------
+- README file for the LCOV galaxy mapping tool -
+- Last changes: 2003-09-04 -
+-------------------------------------------------
+
+Description
+-----------
+
+Further README contents
+-----------------------
+ 1. Included files
+ 2. Installing
+ 3. Notes and Comments
+
+
+
+1. Important files
+------------------
+ README - This README file
+ CHANGES - List of changes between releases
+ conglomerate_functions.pl - Replacement file - Generates shading
+ genflat.pl - Generates info for shading from .info files
+ gen_makefile.sh - Replacement file - updates to postscript
+ posterize.pl - Replacement file - generates a final ps file
+
+2. Installing
+-------------
+ This install requires fcgp, which means the target kernel src must be on
+the system creating the map.
+
+ Download and copy the new files into the fcgp directory, (Note: its always
+a good idea to have backups).
+
+ Run genflat.pl against your kernel info files
+ ./genflat.pl kernel.info kernel2.info > coverage.dat
+
+ Run the make command for the fcgp (Note: this can take a while)
+ make KERNEL_DIR=/usr/src/linux
+
+ Update posterize.pl as needed, normally page size, margins, titles.
+Most of these settings will be broken out as command line options in the future.
+
+ Run posterize.pl this will generate the file poster.ps.
+
+3. Notes and Comments
+---------------------
+ This is a quick and dirty implementation suited for my needs. It does not
+perform any of the tiling the original did.
diff --git a/third_party/lcov/contrib/galaxy/conglomerate_functions.pl b/third_party/lcov/contrib/galaxy/conglomerate_functions.pl
new file mode 100755
index 0000000..4e259fe
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/conglomerate_functions.pl
@@ -0,0 +1,195 @@
+#! /usr/bin/perl -w
+
+# Takes a set of ps images (belonging to one file) and produces a
+# conglomerate picture of that file: static functions in the middle,
+# others around it. Each one gets a box about its area.
+
+use strict;
+
+my $SCRUNCH = $ARGV [0];
+my $BOXSCRUNCH = $ARGV [1];
+my $Tmp;
+my $DEBUG = 1;
+
+shift @ARGV; # skip SCRUNCH and BOXSCRUNCH
+shift @ARGV;
+
+
+DecorateFuncs (@ARGV);
+
+
+#TMPFILE=`mktemp ${TMPDIR:-/tmp}/$$.XXXXXX`
+
+# Arrange.
+my $ArgList = "";
+
+foreach $Tmp (@ARGV) {
+ $ArgList .= "'$Tmp' ";
+}
+
+my @Arranged = `../draw_arrangement $SCRUNCH 0 360 0 $ArgList`;
+
+my $CFile = $ARGV [0];
+$CFile =~ s/\.c\..*$/.c/;
+if ($DEBUG) { print ("% Conglomeration of $CFile\n"); }
+
+print "gsave angle rotate\n";
+
+# Now output the file, except last line.
+my $LastLine = pop (@Arranged);
+my $Fill = Box_2 ($LastLine,$CFile);
+print $Fill;
+# Draw box with file name
+my @Output = Box ('normal', 'Helvetica-Bold', 32, $CFile, $LastLine);
+splice(@Output, $#Output, 0, "grestore\n");
+#print @Output;
+
+print (@Arranged);
+#add a duplicate box to test if this works
+print @Output;
+
+
+sub ParseBound
+{
+ my $BBoxLine = shift;
+
+ $BBoxLine =~ /(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)\s+(-?[\d.]+)/;
+
+ # XMin, YMin, XMax, YMax
+ return ($1 * $BOXSCRUNCH, $2 * $BOXSCRUNCH,
+ $3 * $BOXSCRUNCH, $4 * $BOXSCRUNCH);
+}
+
+
+
+# Box (type, font, fontsize, Label, BBoxLine)
+sub Box
+{
+ my $Type = shift;
+ my $Font = shift;
+ my $Fontsize = shift;
+ my $Label = shift;
+ my $BBoxLine = shift;
+ my @Output = ();
+
+ # print (STDERR "Box ('$Type', '$Font', '$Fontsize', '$Label', '$BBoxLine')\n");
+ push (@Output, "% start of box\n");
+
+ push (@Output, "D5\n") if ($Type eq "dashed");
+
+ # print (STDERR "BBoxLine: '$BBoxLine'\n");
+ # print (STDERR "Parsed: '" . join ("' '", ParseBound ($BBoxLine)) . "\n");
+ my ($XMin, $YMin, $XMax, $YMax) = ParseBound ($BBoxLine);
+
+ my $LeftSpaced = $XMin + 6;
+ my $BottomSpaced = $YMin + 6;
+
+ # Put black box around it
+ push (@Output, (
+ "($Label) $LeftSpaced $BottomSpaced $Fontsize /$Font\n",
+ "$YMin $XMin $YMax $XMax U\n"
+ )
+ );
+
+ push (@Output, "D\n") if ($Type eq "dashed");
+ # fill bounding box
+ push (@Output, "% end of box\n");
+
+ # Output bounding box
+ push (@Output, "% bound $XMin $YMin $XMax $YMax\n");
+
+ return @Output;
+}
+
+sub Box_2
+{
+ my $BBoxLine = shift;
+ my $CFile = shift;
+ my $CovFile = "./coverage.dat";
+ my ($XMin, $YMin, $XMax, $YMax) = ParseBound ($BBoxLine);
+ my @output = `fgrep $CFile $CovFile`;
+ chomp $output[0];
+ my ($junk, $Class, $per) = split /\t/, $output[0];
+ return "$XMin $YMin $XMax $YMax $Class\n";
+}
+# Decorate (rgb-vals(1 string) filename)
+sub Decorate
+{
+ my $RGB = shift;
+ my $Filename = shift;
+
+ my @Input = ReadPS ($Filename);
+ my $LastLine = pop (@Input);
+ my @Output = ();
+
+ # Color at the beginning.
+ push (@Output, "C$RGB\n");
+
+ # Now output the file, except last line.
+ push (@Output, @Input);
+
+ # Draw dashed box with function name
+ # FIXME Make bound cover the label as well!
+ my $FuncName = $Filename;
+ $FuncName =~ s/^[^.]+\.c\.(.+?)\..*$/$1/;
+
+ push (@Output, Box ('dashed', 'Helvetica', 24, $FuncName, $LastLine));
+
+ # Slap over the top.
+ WritePS ($Filename, @Output);
+}
+
+
+
+# Add colored boxes around functions
+sub DecorateFuncs
+{
+ my $FName = "";
+ my $FType = "";
+
+ foreach $FName (@ARGV)
+ {
+ $FName =~ /\+([A-Z]+)\+/;
+ $FType = $1;
+
+ if ($FType eq 'STATIC') {
+ Decorate ("2", $FName); # Light green.
+ }
+ elsif ($FType eq 'INDIRECT') {
+ Decorate ("3", $FName); # Green.
+ }
+ elsif ($FType eq 'EXPORTED') {
+ Decorate ("4", $FName); # Red.
+ }
+ elsif ($FType eq 'NORMAL') {
+ Decorate ("5", $FName); # Blue.
+ }
+ else {
+ die ("Unknown extension $FName");
+ }
+ }
+}
+
+
+sub ReadPS
+{
+ my $Filename = shift;
+ my @Contents = ();
+
+ open (INFILE, "$Filename") or die ("Could not read $Filename: $!");
+ @Contents = <INFILE>;
+ close (INFILE);
+
+ return @Contents;
+}
+
+sub WritePS
+{
+ my $Filename = shift;
+
+ open (OUTFILE, ">$Filename")
+ or die ("Could not write $Filename: $!");
+ print (OUTFILE @_);
+ close (OUTFILE);
+}
+
diff --git a/third_party/lcov/contrib/galaxy/gen_makefile.sh b/third_party/lcov/contrib/galaxy/gen_makefile.sh
new file mode 100755
index 0000000..ab51a5e
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/gen_makefile.sh
@@ -0,0 +1,129 @@
+#! /bin/sh
+
+cd image
+
+# Space-optimized version: strip comments, drop precision to 3
+# figures, eliminate duplicates.
+# update(creinig): precision reduction is now done in data2ps and comments
+# (except for % bound) now are also ommitted from the start
+
+echo 'image.ps: image-unop.ps'
+#echo ' grep -v "^%" < $< | sed -e "s/\.\([0-9][0-9]\)[0-9]\+/.\1/g" -e "s/\(^\| \|-\)\([0-9][0-9][0-9]\)[0-9][0-9]\.[0-9][0-9]/\1\200/g" -e "s/\(^\| \|-\)\([0-9][0-9][0-9]\)[0-9]\.[0-9][0-9]/\1\20/g" -e "s/\(^\| \|-\)\([0-9][0-9][0-9]\)\.[0-9][0-9]/\1\2/g" -e "s/\(^\| \|-\)\([0-9][0-9]\)\.\([0-9]\)[0-9]/\1\2.\30/g" | awk "\$$0 ~ /lineto/ { if ( LASTLINE == \$$0 ) next; } { LASTLINE=\$$0; print; }" > $@'
+echo ' grep -v "^% bound" < $< > $@'
+# Need last comment (bounding box)
+echo ' tail -1 $< >> $@'
+echo ' ls -l image.ps image-unop.ps'
+
+echo 'image-unop.ps: outline.ps ring1.ps ring2.ps ring3.ps ring4.ps'
+echo ' cat ring[1234].ps > $@'
+# Bounding box is at bottom now. Next two won't change it.
+echo ' tail -1 $@ > bounding-box'
+echo ' cat outline.ps >> $@'
+echo ' cat ../tux.ps >> $@'
+echo ' cat bounding-box >> $@ && rm bounding-box'
+
+# Finished rings are precious!
+echo .SECONDARY: ring1.ps ring2.ps ring3.ps ring4.ps
+
+# Rings 1 and 4 are all thrown together.
+echo RING1_DEPS:=`find $RING1 -name '*.c.*' | sed 's/\.c.*/-all.ps/' | sort | uniq`
+echo RING4_DEPS:=`find $RING4 -name '*.c.*' | sed 's/\.c.*/-all.ps/' | sort | uniq`
+
+# Other rings are divided into dirs.
+echo RING2_DEPS:=`for d in $RING2; do echo $d-ring2.ps; done`
+echo RING3_DEPS:=`for d in $RING3; do echo $d-ring3.ps; done`
+echo
+
+# First ring starts at inner radius.
+echo 'ring1.ps: $(RING1_DEPS)'
+echo " @echo Making Ring 1"
+echo " @echo /angle 0 def > \$@"
+echo " @../draw_arrangement $FILE_SCRUNCH 0 360 $INNER_RADIUS \$(RING1_DEPS) >> \$@"
+echo " @echo Done Ring 1"
+
+# Second ring starts at end of above ring (assume it's circular, so
+# grab any bound).
+echo 'ring2.ps: ring1.ps $(RING2_DEPS)'
+echo " @echo Making Ring 2"
+echo " @echo /angle 0 def > \$@"
+echo " @../rotary_arrange.sh $DIR_SPACING" `for f in $RING2; do echo $f-ring2.ps $f-ring2.angle; done` '>> $@'
+echo " @echo Done Ring 2"
+
+# Third ring starts at end of second ring.
+echo 'ring3.ps: ring2.ps $(RING3_DEPS)'
+echo " @echo Making Ring 3"
+echo " @echo /angle 0 def > \$@"
+echo " @../rotary_arrange.sh $DIR_SPACING" `for f in $RING3; do echo $f-ring3.ps $f-ring3.angle; done` '>> $@'
+echo " @echo Done Ring 3"
+
+# Outer ring starts at end of fourth ring.
+# And it's just a big ring of drivers.
+echo 'ring4.ps: $(RING4_DEPS) ring3.radius'
+echo " @echo Making Ring 4"
+echo " @echo /angle 0 def > \$@"
+echo " @../draw_arrangement $FILE_SCRUNCH 0 360 \`cat ring3.radius\` \$(RING4_DEPS) >> \$@"
+echo " @echo Done Ring 4"
+echo
+
+# How to make directory picture: angle file contains start and end angle.
+# Second ring starts at end of above ring (assume it's circular, so
+# grab any bound).
+echo "%-ring2.ps: %-ring2.angle ring1.radius"
+echo " @echo Rendering \$@"
+echo " @../draw_arrangement $FILE_SCRUNCH 0 \`cat \$<\` \`cat ring1.radius\` \`find \$* -name '*-all.ps'\` > \$@"
+
+echo "%-ring3.ps: %-ring3.angle ring2.radius"
+echo " @echo Rendering \$@"
+echo " @../draw_arrangement $FILE_SCRUNCH 0 \`cat \$<\` \`cat ring2.radius\` \`find \$* -name '*-all.ps'\` > \$@"
+
+# How to extract radii
+echo "%.radius: %.ps"
+echo ' @echo scale=2\; `tail -1 $< | sed "s/^.* //"` + '$RING_SPACING' | bc > $@'
+echo
+
+# How to make angle. Need total angle for that directory, and weight.
+echo "%-ring2.angle: %-ring2.weight ring2.weight"
+echo ' @echo "scale=2; ( 360 - ' `echo $RING2 | wc -w` ' * ' $DIR_SPACING ') * `cat $<` / `cat ring2.weight`" | bc > $@'
+
+echo "%-ring3.angle: %-ring3.weight ring3.weight"
+echo ' @echo "scale=2; ( 360 - ' `echo $RING3 | wc -w` ' * ' $DIR_SPACING ') * `cat $<` / `cat ring3.weight`" | bc > $@'
+
+# How to make ring weights (sum directory totals).
+echo "ring2.weight:" `for d in $RING2; do echo $d-ring2.weight; done`
+echo ' @cat $^ | ../tally > $@'
+echo "ring3.weight:" `for d in $RING3; do echo $d-ring3.weight; done`
+echo ' @cat $^ | ../tally > $@'
+
+# How to make a wieght.
+echo "%-ring2.weight:" `find $RING2 -name '*.c.*' | sed 's/\.c.*/-all.ps/' | sort | uniq`
+echo ' @../total_area.pl `find $* -name \*-all.ps` > $@'
+echo "%-ring3.weight:" `find $RING3 -name '*.c.*' | sed 's/\.c.*/-all.ps/' | sort | uniq`
+echo ' @../total_area.pl `find $* -name \*-all.ps` > $@'
+echo
+
+# Now rule to make the graphs of a function.
+#echo %.ps::%
+#echo ' @../function2ps `echo $< | sed '\''s/^.*\.\([^.]*\)\.\+.*$$/\1/'\''` > $@ $<'
+## Need the space.
+##echo ' @rm -f $<'
+#echo
+
+# Rule to make all from constituent parts.
+echo %-all.ps:
+echo " @echo Rendering \$*.c"
+echo " @../conglomerate_functions.pl $FUNCTION_SCRUNCH $BOX_SCRUNCH \$^ > \$@"
+# Need the space.
+#echo ' @rm -f $^'
+echo
+
+# Generating outline, requires all the angles.
+echo outline.ps: ../make-outline.sh ring1.ps ring2.ps ring3.ps ring4.ps `for f in $RING2; do echo $f-ring2.angle; done` `for f in $RING3; do echo $f-ring3.angle; done`
+echo " ../make-outline.sh $INNER_RADIUS $DIR_SPACING $RING_SPACING \"$RING1\" > \$@"
+echo
+
+# Now all the rules to make each function.
+for d in `find . -type d`; do
+ for f in `cd $d; ls *+.ps 2>/dev/null | sed 's/\.c\..*$//' | uniq`; do
+ echo $d/$f-all.ps: `cd $d; ls $f.c.* | sed -e "s?^?$d/?"`
+ done
+done
diff --git a/third_party/lcov/contrib/galaxy/genflat.pl b/third_party/lcov/contrib/galaxy/genflat.pl
new file mode 100755
index 0000000..b8b8ff4
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/genflat.pl
@@ -0,0 +1,1238 @@
+#!/usr/bin/perl -w
+#
+# Copyright (c) International Business Machines Corp., 2002
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# genflat
+#
+# This script generates std output from .info files as created by the
+# geninfo script. Call it with --help to get information on usage and
+# available options. This code is based on the lcov genhtml script
+# by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
+#
+#
+# History:
+# 2003-08-19 ripped up Peter's script James M Kenefick Jr. <jkenefic@us.ibm.com>
+#
+
+use strict;
+use File::Basename;
+use Getopt::Long;
+# Constants
+our $lcov_version = "";
+our $lcov_url = "";
+
+# Specify coverage rate limits (in %) for classifying file entries
+# HI: $hi_limit <= rate <= 100 graph color: green
+# MED: $med_limit <= rate < $hi_limit graph color: orange
+# LO: 0 <= rate < $med_limit graph color: red
+our $hi_limit = 50;
+our $med_limit = 15;
+
+# Data related prototypes
+sub print_usage(*);
+sub gen_html();
+sub process_dir($);
+sub process_file($$$);
+sub info(@);
+sub read_info_file($);
+sub get_info_entry($);
+sub set_info_entry($$$$;$$);
+sub get_prefix(@);
+sub shorten_prefix($);
+sub get_dir_list(@);
+sub get_relative_base_path($);
+sub get_date_string();
+sub split_filename($);
+sub subtract_counts($$);
+sub add_counts($$);
+sub apply_baseline($$);
+sub combine_info_files($$);
+sub combine_info_entries($$);
+sub apply_prefix($$);
+sub escape_regexp($);
+
+
+# HTML related prototypes
+
+
+sub write_file_table(*$$$$);
+
+
+# Global variables & initialization
+our %info_data; # Hash containing all data from .info file
+our $dir_prefix; # Prefix to remove from all sub directories
+our %test_description; # Hash containing test descriptions if available
+our $date = get_date_string();
+
+our @info_filenames; # List of .info files to use as data source
+our $test_title; # Title for output as written to each page header
+our $output_directory; # Name of directory in which to store output
+our $base_filename; # Optional name of file containing baseline data
+our $desc_filename; # Name of file containing test descriptions
+our $css_filename; # Optional name of external stylesheet file to use
+our $quiet; # If set, suppress information messages
+our $help; # Help option flag
+our $version; # Version option flag
+our $show_details; # If set, generate detailed directory view
+our $no_prefix; # If set, do not remove filename prefix
+our $frames; # If set, use frames for source code view
+our $keep_descriptions; # If set, do not remove unused test case descriptions
+our $no_sourceview; # If set, do not create a source code view for each file
+our $tab_size = 8; # Number of spaces to use in place of tab
+
+our $cwd = `pwd`; # Current working directory
+chomp($cwd);
+our $tool_dir = dirname($0); # Directory where genhtml tool is installed
+
+
+#
+# Code entry point
+#
+
+# Add current working directory if $tool_dir is not already an absolute path
+if (! ($tool_dir =~ /^\/(.*)$/))
+{
+ $tool_dir = "$cwd/$tool_dir";
+}
+
+# Parse command line options
+if (!GetOptions("output-directory=s" => \$output_directory,
+ "css-file=s" => \$css_filename,
+ "baseline-file=s" => \$base_filename,
+ "prefix=s" => \$dir_prefix,
+ "num-spaces=i" => \$tab_size,
+ "no-prefix" => \$no_prefix,
+ "quiet" => \$quiet,
+ "help" => \$help,
+ "version" => \$version
+ ))
+{
+ print_usage(*STDERR);
+ exit(1);
+}
+
+@info_filenames = @ARGV;
+
+# Check for help option
+if ($help)
+{
+ print_usage(*STDOUT);
+ exit(0);
+}
+
+# Check for version option
+if ($version)
+{
+ print($lcov_version."\n");
+ exit(0);
+}
+
+# Check for info filename
+if (!@info_filenames)
+{
+ print(STDERR "No filename specified\n");
+ print_usage(*STDERR);
+ exit(1);
+}
+
+# Generate a title if none is specified
+if (!$test_title)
+{
+ if (scalar(@info_filenames) == 1)
+ {
+ # Only one filename specified, use it as title
+ $test_title = basename($info_filenames[0]);
+ }
+ else
+ {
+ # More than one filename specified, used default title
+ $test_title = "unnamed";
+ }
+}
+
+# Make sure tab_size is within valid range
+if ($tab_size < 1)
+{
+ print(STDERR "ERROR: invalid number of spaces specified: ".
+ "$tab_size!\n");
+ exit(1);
+}
+
+# Do something
+gen_html();
+
+exit(0);
+
+
+
+#
+# print_usage(handle)
+#
+# Print usage information.
+#
+
+sub print_usage(*)
+{
+ local *HANDLE = $_[0];
+ my $executable_name = basename($0);
+
+ print(HANDLE <<END_OF_USAGE);
+Usage: $executable_name [OPTIONS] INFOFILE(S)
+
+Create HTML output for coverage data found in INFOFILE. Note that INFOFILE
+may also be a list of filenames.
+
+ -h, --help Print this help, then exit
+ -v, --version Print version number, then exit
+ -q, --quiet Do not print progress messages
+ -b, --baseline-file BASEFILE Use BASEFILE as baseline file
+ -p, --prefix PREFIX Remove PREFIX from all directory names
+ --no-prefix Do not remove prefix from directory names
+ --no-source Do not create source code view
+ --num-spaces NUM Replace tabs with NUM spaces in source view
+
+See $lcov_url for more information about this tool.
+END_OF_USAGE
+ ;
+}
+
+
+#
+# gen_html()
+#
+# Generate a set of HTML pages from contents of .info file INFO_FILENAME.
+# Files will be written to the current directory. If provided, test case
+# descriptions will be read from .tests file TEST_FILENAME and included
+# in ouput.
+#
+# Die on error.
+#
+
+sub gen_html()
+{
+ local *HTML_HANDLE;
+ my %overview;
+ my %base_data;
+ my $lines_found;
+ my $lines_hit;
+ my $overall_found = 0;
+ my $overall_hit = 0;
+ my $dir_name;
+ my $link_name;
+ my @dir_list;
+ my %new_info;
+
+ # Read in all specified .info files
+ foreach (@info_filenames)
+ {
+ info("Reading data file $_\n");
+ %new_info = %{read_info_file($_)};
+
+ # Combine %new_info with %info_data
+ %info_data = %{combine_info_files(\%info_data, \%new_info)};
+ }
+
+ info("Found %d entries.\n", scalar(keys(%info_data)));
+
+ # Read and apply baseline data if specified
+ if ($base_filename)
+ {
+ # Read baseline file
+ info("Reading baseline file $base_filename\n");
+ %base_data = %{read_info_file($base_filename)};
+ info("Found %d entries.\n", scalar(keys(%base_data)));
+
+ # Apply baseline
+ info("Subtracting baseline data.\n");
+ %info_data = %{apply_baseline(\%info_data, \%base_data)};
+ }
+
+ @dir_list = get_dir_list(keys(%info_data));
+
+ if ($no_prefix)
+ {
+ # User requested that we leave filenames alone
+ info("User asked not to remove filename prefix\n");
+ }
+ elsif (!defined($dir_prefix))
+ {
+ # Get prefix common to most directories in list
+ $dir_prefix = get_prefix(@dir_list);
+
+ if ($dir_prefix)
+ {
+ info("Found common filename prefix \"$dir_prefix\"\n");
+ }
+ else
+ {
+ info("No common filename prefix found!\n");
+ $no_prefix=1;
+ }
+ }
+ else
+ {
+ info("Using user-specified filename prefix \"".
+ "$dir_prefix\"\n");
+ }
+
+ # Process each subdirectory and collect overview information
+ foreach $dir_name (@dir_list)
+ {
+ ($lines_found, $lines_hit) = process_dir($dir_name);
+
+ $overview{$dir_name} = "$lines_found,$lines_hit, ";
+ $overall_found += $lines_found;
+ $overall_hit += $lines_hit;
+ }
+
+
+ if ($overall_found == 0)
+ {
+ info("Warning: No lines found!\n");
+ }
+ else
+ {
+ info("Overall coverage rate: %d of %d lines (%.1f%%)\n",
+ $overall_hit, $overall_found,
+ $overall_hit*100/$overall_found);
+ }
+}
+
+
+#
+# process_dir(dir_name)
+#
+
+sub process_dir($)
+{
+ my $abs_dir = $_[0];
+ my $trunc_dir;
+ my $rel_dir = $abs_dir;
+ my $base_dir;
+ my $filename;
+ my %overview;
+ my $lines_found;
+ my $lines_hit;
+ my $overall_found=0;
+ my $overall_hit=0;
+ my $base_name;
+ my $extension;
+ my $testdata;
+ my %testhash;
+ local *HTML_HANDLE;
+
+ # Remove prefix if applicable
+ if (!$no_prefix)
+ {
+ # Match directory name beginning with $dir_prefix
+ $rel_dir = apply_prefix($rel_dir, $dir_prefix);
+ }
+
+ $trunc_dir = $rel_dir;
+
+ # Remove leading /
+ if ($rel_dir =~ /^\/(.*)$/)
+ {
+ $rel_dir = substr($rel_dir, 1);
+ }
+
+ $base_dir = get_relative_base_path($rel_dir);
+
+ $abs_dir = escape_regexp($abs_dir);
+
+ # Match filenames which specify files in this directory, not including
+ # sub-directories
+ foreach $filename (grep(/^$abs_dir\/[^\/]*$/,keys(%info_data)))
+ {
+ ($lines_found, $lines_hit, $testdata) =
+ process_file($trunc_dir, $rel_dir, $filename);
+
+ $base_name = basename($filename);
+
+ $overview{$base_name} = "$lines_found,$lines_hit";
+
+ $testhash{$base_name} = $testdata;
+
+ $overall_found += $lines_found;
+ $overall_hit += $lines_hit;
+ }
+ write_file_table($abs_dir, "./linux/", \%overview, \%testhash, 4);
+
+
+ # Calculate resulting line counts
+ return ($overall_found, $overall_hit);
+}
+
+
+#
+# process_file(trunc_dir, rel_dir, filename)
+#
+
+sub process_file($$$)
+{
+ info("Processing file ".apply_prefix($_[2], $dir_prefix)."\n");
+ my $trunc_dir = $_[0];
+ my $rel_dir = $_[1];
+ my $filename = $_[2];
+ my $base_name = basename($filename);
+ my $base_dir = get_relative_base_path($rel_dir);
+ my $testdata;
+ my $testcount;
+ my $sumcount;
+ my $funcdata;
+ my $lines_found;
+ my $lines_hit;
+ my @source;
+ my $pagetitle;
+
+ ($testdata, $sumcount, $funcdata, $lines_found, $lines_hit) =
+ get_info_entry($info_data{$filename});
+ return ($lines_found, $lines_hit, $testdata);
+}
+
+
+#
+# read_info_file(info_filename)
+#
+# Read in the contents of the .info file specified by INFO_FILENAME. Data will
+# be returned as a reference to a hash containing the following mappings:
+#
+# %result: for each filename found in file -> \%data
+#
+# %data: "test" -> \%testdata
+# "sum" -> \%sumcount
+# "func" -> \%funcdata
+# "found" -> $lines_found (number of instrumented lines found in file)
+# "hit" -> $lines_hit (number of executed lines in file)
+#
+# %testdata: name of test affecting this file -> \%testcount
+#
+# %testcount: line number -> execution count for a single test
+# %sumcount : line number -> execution count for all tests
+# %funcdata : line number -> name of function beginning at that line
+#
+# Note that .info file sections referring to the same file and test name
+# will automatically be combined by adding all execution counts.
+#
+# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
+# is compressed using GZIP. If available, GUNZIP will be used to decompress
+# this file.
+#
+# Die on error
+#
+
+sub read_info_file($)
+{
+ my $tracefile = $_[0]; # Name of tracefile
+ my %result; # Resulting hash: file -> data
+ my $data; # Data handle for current entry
+ my $testdata; # " "
+ my $testcount; # " "
+ my $sumcount; # " "
+ my $funcdata; # " "
+ my $line; # Current line read from .info file
+ my $testname; # Current test name
+ my $filename; # Current filename
+ my $hitcount; # Count for lines hit
+ my $count; # Execution count of current line
+ my $negative; # If set, warn about negative counts
+ local *INFO_HANDLE; # Filehandle for .info file
+
+ # Check if file exists and is readable
+ stat($_[0]);
+ if (!(-r _))
+ {
+ die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ # Check if this is really a plain file
+ if (!(-f _))
+ {
+ die("ERROR: not a plain file: $_[0]!\n");
+ }
+
+ # Check for .gz extension
+ if ($_[0] =~ /^(.*)\.gz$/)
+ {
+ # Check for availability of GZIP tool
+ system("gunzip -h >/dev/null 2>/dev/null")
+ and die("ERROR: gunzip command not available!\n");
+
+ # Check integrity of compressed file
+ system("gunzip -t $_[0] >/dev/null 2>/dev/null")
+ and die("ERROR: integrity check failed for ".
+ "compressed file $_[0]!\n");
+
+ # Open compressed file
+ open(INFO_HANDLE, "gunzip -c $_[0]|")
+ or die("ERROR: cannot start gunzip to uncompress ".
+ "file $_[0]!\n");
+ }
+ else
+ {
+ # Open uncompressed file
+ open(INFO_HANDLE, $_[0])
+ or die("ERROR: cannot read file $_[0]!\n");
+ }
+
+ $testname = "";
+ while (<INFO_HANDLE>)
+ {
+ chomp($_);
+ $line = $_;
+
+ # Switch statement
+ foreach ($line)
+ {
+ /^TN:(\w+)/ && do
+ {
+ # Test name information found
+ $testname = $1;
+ last;
+ };
+
+ /^[SK]F:(.*)/ && do
+ {
+ # Filename information found
+ # Retrieve data for new entry
+ $filename = $1;
+
+ $data = $result{$filename};
+ ($testdata, $sumcount, $funcdata) =
+ get_info_entry($data);
+
+ if (defined($testname))
+ {
+ $testcount = $testdata->{$testname};
+ }
+ else
+ {
+ my %new_hash;
+ $testcount = \%new_hash;
+ }
+ last;
+ };
+
+ /^DA:(\d+),(-?\d+)/ && do
+ {
+ # Fix negative counts
+ $count = $2 < 0 ? 0 : $2;
+ if ($2 < 0)
+ {
+ $negative = 1;
+ }
+ # Execution count found, add to structure
+ # Add summary counts
+ $sumcount->{$1} += $count;
+
+ # Add test-specific counts
+ if (defined($testname))
+ {
+ $testcount->{$1} += $count;
+ }
+ last;
+ };
+
+ /^FN:(\d+),([^,]+)/ && do
+ {
+ # Function data found, add to structure
+ $funcdata->{$1} = $2;
+ last;
+ };
+
+ /^end_of_record/ && do
+ {
+ # Found end of section marker
+ if ($filename)
+ {
+ # Store current section data
+ if (defined($testname))
+ {
+ $testdata->{$testname} =
+ $testcount;
+ }
+ set_info_entry($data, $testdata,
+ $sumcount, $funcdata);
+ $result{$filename} = $data;
+ }
+
+ };
+
+ # default
+ last;
+ }
+ }
+ close(INFO_HANDLE);
+
+ # Calculate lines_found and lines_hit for each file
+ foreach $filename (keys(%result))
+ {
+ $data = $result{$filename};
+
+ ($testdata, $sumcount, $funcdata) = get_info_entry($data);
+
+ $data->{"found"} = scalar(keys(%{$sumcount}));
+ $hitcount = 0;
+
+ foreach (keys(%{$sumcount}))
+ {
+ if ($sumcount->{$_} >0) { $hitcount++; }
+ }
+
+ $data->{"hit"} = $hitcount;
+
+ $result{$filename} = $data;
+ }
+
+ if (scalar(keys(%result)) == 0)
+ {
+ die("ERROR: No valid records found in tracefile $tracefile\n");
+ }
+ if ($negative)
+ {
+ warn("WARNING: Negative counts found in tracefile ".
+ "$tracefile\n");
+ }
+
+ return(\%result);
+}
+
+
+#
+# get_info_entry(hash_ref)
+#
+# Retrieve data from an entry of the structure generated by read_info_file().
+# Return a list of references to hashes:
+# (test data hash ref, sum count hash ref, funcdata hash ref, lines found,
+# lines hit)
+#
+
+sub get_info_entry($)
+{
+ my $testdata_ref = $_[0]->{"test"};
+ my $sumcount_ref = $_[0]->{"sum"};
+ my $funcdata_ref = $_[0]->{"func"};
+ my $lines_found = $_[0]->{"found"};
+ my $lines_hit = $_[0]->{"hit"};
+
+ return ($testdata_ref, $sumcount_ref, $funcdata_ref, $lines_found,
+ $lines_hit);
+}
+
+
+#
+# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref[,
+# lines_found, lines_hit])
+#
+# Update the hash referenced by HASH_REF with the provided data references.
+#
+
+sub set_info_entry($$$$;$$)
+{
+ my $data_ref = $_[0];
+
+ $data_ref->{"test"} = $_[1];
+ $data_ref->{"sum"} = $_[2];
+ $data_ref->{"func"} = $_[3];
+
+ if (defined($_[4])) { $data_ref->{"found"} = $_[4]; }
+ if (defined($_[5])) { $data_ref->{"hit"} = $_[5]; }
+}
+
+
+#
+# get_prefix(filename_list)
+#
+# Search FILENAME_LIST for a directory prefix which is common to as many
+# list entries as possible, so that removing this prefix will minimize the
+# sum of the lengths of all resulting shortened filenames.
+#
+
+sub get_prefix(@)
+{
+ my @filename_list = @_; # provided list of filenames
+ my %prefix; # mapping: prefix -> sum of lengths
+ my $current; # Temporary iteration variable
+
+ # Find list of prefixes
+ foreach (@filename_list)
+ {
+ # Need explicit assignment to get a copy of $_ so that
+ # shortening the contained prefix does not affect the list
+ $current = shorten_prefix($_);
+ while ($current = shorten_prefix($current))
+ {
+ # Skip rest if the remaining prefix has already been
+ # added to hash
+ if ($prefix{$current}) { last; }
+
+ # Initialize with 0
+ $prefix{$current}="0";
+ }
+
+ }
+
+ # Calculate sum of lengths for all prefixes
+ foreach $current (keys(%prefix))
+ {
+ foreach (@filename_list)
+ {
+ # Add original length
+ $prefix{$current} += length($_);
+
+ # Check whether prefix matches
+ if (substr($_, 0, length($current)) eq $current)
+ {
+ # Subtract prefix length for this filename
+ $prefix{$current} -= length($current);
+ }
+ }
+ }
+
+ # Find and return prefix with minimal sum
+ $current = (keys(%prefix))[0];
+
+ foreach (keys(%prefix))
+ {
+ if ($prefix{$_} < $prefix{$current})
+ {
+ $current = $_;
+ }
+ }
+
+ return($current);
+}
+
+
+#
+# shorten_prefix(prefix)
+#
+# Return PREFIX shortened by last directory component.
+#
+
+sub shorten_prefix($)
+{
+ my @list = split("/", $_[0]);
+
+ pop(@list);
+ return join("/", @list);
+}
+
+
+
+#
+# get_dir_list(filename_list)
+#
+# Return sorted list of directories for each entry in given FILENAME_LIST.
+#
+
+sub get_dir_list(@)
+{
+ my %result;
+
+ foreach (@_)
+ {
+ $result{shorten_prefix($_)} = "";
+ }
+
+ return(sort(keys(%result)));
+}
+
+
+#
+# get_relative_base_path(subdirectory)
+#
+# Return a relative path string which references the base path when applied
+# in SUBDIRECTORY.
+#
+# Example: get_relative_base_path("fs/mm") -> "../../"
+#
+
+sub get_relative_base_path($)
+{
+ my $result = "";
+ my $index;
+
+ # Make an empty directory path a special case
+ if (!$_[0]) { return(""); }
+
+ # Count number of /s in path
+ $index = ($_[0] =~ s/\//\//g);
+
+ # Add a ../ to $result for each / in the directory path + 1
+ for (; $index>=0; $index--)
+ {
+ $result .= "../";
+ }
+
+ return $result;
+}
+
+
+#
+# get_date_string()
+#
+# Return the current date in the form: yyyy-mm-dd
+#
+
+sub get_date_string()
+{
+ my $year;
+ my $month;
+ my $day;
+
+ ($year, $month, $day) = (localtime())[5, 4, 3];
+
+ return sprintf("%d-%02d-%02d", $year+1900, $month+1, $day);
+}
+
+
+#
+# split_filename(filename)
+#
+# Return (path, filename, extension) for a given FILENAME.
+#
+
+sub split_filename($)
+{
+ if (!$_[0]) { return(); }
+ my @path_components = split('/', $_[0]);
+ my @file_components = split('\.', pop(@path_components));
+ my $extension = pop(@file_components);
+
+ return (join("/",@path_components), join(".",@file_components),
+ $extension);
+}
+
+
+#
+# write_file_table(filehandle, base_dir, overview, testhash, fileview)
+#
+# Write a complete file table. OVERVIEW is a reference to a hash containing
+# the following mapping:
+#
+# filename -> "lines_found,lines_hit,page_link"
+#
+# TESTHASH is a reference to the following hash:
+#
+# filename -> \%testdata
+# %testdata: name of test affecting this file -> \%testcount
+# %testcount: line number -> execution count for a single test
+#
+# Heading of first column is "Filename" if FILEVIEW is true, "Directory name"
+# otherwise.
+#
+
+sub write_file_table(*$$$$)
+{
+ my $dir = $_[0];
+ my $base_dir = $_[1];
+ my %overview = %{$_[2]};
+ my %testhash = %{$_[3]};
+ my $fileview = $_[4];
+ my $filename;
+ my $hit;
+ my $found;
+ my $classification;
+ my $rate_string;
+ my $rate;
+ my $junk;
+
+
+ foreach $filename (sort(keys(%overview)))
+ {
+ ($found, $hit, $junk) = split(",", $overview{$filename});
+#James I think this is right
+ $rate = $hit * 100 / $found;
+ $rate_string = sprintf("%.1f", $rate);
+
+ if ($rate < 0.001) { $classification = "None"; }
+ elsif ($rate < $med_limit) { $classification = "Lo"; }
+ elsif ($rate < $hi_limit) { $classification = "Med"; }
+ else { $classification = "Hi"; }
+
+ print "$dir/$filename\t$classification\t$rate_string\n";
+
+ }
+}
+
+
+#
+# info(printf_parameter)
+#
+# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
+# is not set.
+#
+
+sub info(@)
+{
+ if (!$quiet)
+ {
+ # Print info string
+ printf(STDERR @_);
+ }
+}
+
+
+#
+# subtract_counts(data_ref, base_ref)
+#
+
+sub subtract_counts($$)
+{
+ my %data = %{$_[0]};
+ my %base = %{$_[1]};
+ my $line;
+ my $data_count;
+ my $base_count;
+ my $hit = 0;
+ my $found = 0;
+
+ foreach $line (keys(%data))
+ {
+ $found++;
+ $data_count = $data{$line};
+ $base_count = $base{$line};
+
+ if (defined($base_count))
+ {
+ $data_count -= $base_count;
+
+ # Make sure we don't get negative numbers
+ if ($data_count<0) { $data_count = 0; }
+ }
+
+ $data{$line} = $data_count;
+ if ($data_count > 0) { $hit++; }
+ }
+
+ return (\%data, $found, $hit);
+}
+
+
+#
+# add_counts(data1_ref, data2_ref)
+#
+# DATA1_REF and DATA2_REF are references to hashes containing a mapping
+#
+# line number -> execution count
+#
+# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
+# is a reference to a hash containing the combined mapping in which
+# execution counts are added.
+#
+
+sub add_counts($$)
+{
+ my %data1 = %{$_[0]}; # Hash 1
+ my %data2 = %{$_[1]}; # Hash 2
+ my %result; # Resulting hash
+ my $line; # Current line iteration scalar
+ my $data1_count; # Count of line in hash1
+ my $data2_count; # Count of line in hash2
+ my $found = 0; # Total number of lines found
+ my $hit = 0; # Number of lines with a count > 0
+
+ foreach $line (keys(%data1))
+ {
+ $data1_count = $data1{$line};
+ $data2_count = $data2{$line};
+
+ # Add counts if present in both hashes
+ if (defined($data2_count)) { $data1_count += $data2_count; }
+
+ # Store sum in %result
+ $result{$line} = $data1_count;
+
+ $found++;
+ if ($data1_count > 0) { $hit++; }
+ }
+
+ # Add lines unique to data2
+ foreach $line (keys(%data2))
+ {
+ # Skip lines already in data1
+ if (defined($data1{$line})) { next; }
+
+ # Copy count from data2
+ $result{$line} = $data2{$line};
+
+ $found++;
+ if ($result{$line} > 0) { $hit++; }
+ }
+
+ return (\%result, $found, $hit);
+}
+
+
+#
+# apply_baseline(data_ref, baseline_ref)
+#
+# Subtract the execution counts found in the baseline hash referenced by
+# BASELINE_REF from actual data in DATA_REF.
+#
+
+sub apply_baseline($$)
+{
+ my %data_hash = %{$_[0]};
+ my %base_hash = %{$_[1]};
+ my $filename;
+ my $testname;
+ my $data;
+ my $data_testdata;
+ my $data_funcdata;
+ my $data_count;
+ my $base;
+ my $base_testdata;
+ my $base_count;
+ my $sumcount;
+ my $found;
+ my $hit;
+
+ foreach $filename (keys(%data_hash))
+ {
+ # Get data set for data and baseline
+ $data = $data_hash{$filename};
+ $base = $base_hash{$filename};
+
+ # Get set entries for data and baseline
+ ($data_testdata, undef, $data_funcdata) =
+ get_info_entry($data);
+ ($base_testdata, $base_count) = get_info_entry($base);
+
+ # Sumcount has to be calculated anew
+ $sumcount = {};
+
+ # For each test case, subtract test specific counts
+ foreach $testname (keys(%{$data_testdata}))
+ {
+ # Get counts of both data and baseline
+ $data_count = $data_testdata->{$testname};
+
+ $hit = 0;
+
+ ($data_count, undef, $hit) =
+ subtract_counts($data_count, $base_count);
+
+ # Check whether this test case did hit any line at all
+ if ($hit > 0)
+ {
+ # Write back resulting hash
+ $data_testdata->{$testname} = $data_count;
+ }
+ else
+ {
+ # Delete test case which did not impact this
+ # file
+ delete($data_testdata->{$testname});
+ }
+
+ # Add counts to sum of counts
+ ($sumcount, $found, $hit) =
+ add_counts($sumcount, $data_count);
+ }
+
+ # Write back resulting entry
+ set_info_entry($data, $data_testdata, $sumcount,
+ $data_funcdata, $found, $hit);
+
+ $data_hash{$filename} = $data;
+ }
+
+ return (\%data_hash);
+}
+
+
+#
+# combine_info_entries(entry_ref1, entry_ref2)
+#
+# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
+# Return reference to resulting hash.
+#
+
+sub combine_info_entries($$)
+{
+ my $entry1 = $_[0]; # Reference to hash containing first entry
+ my $testdata1;
+ my $sumcount1;
+ my $funcdata1;
+
+ my $entry2 = $_[1]; # Reference to hash containing second entry
+ my $testdata2;
+ my $sumcount2;
+ my $funcdata2;
+
+ my %result; # Hash containing combined entry
+ my %result_testdata;
+ my $result_sumcount = {};
+ my %result_funcdata;
+ my $lines_found;
+ my $lines_hit;
+
+ my $testname;
+
+ # Retrieve data
+ ($testdata1, $sumcount1, $funcdata1) = get_info_entry($entry1);
+ ($testdata2, $sumcount2, $funcdata2) = get_info_entry($entry2);
+
+ # Combine funcdata
+ foreach (keys(%{$funcdata1}))
+ {
+ $result_funcdata{$_} = $funcdata1->{$_};
+ }
+
+ foreach (keys(%{$funcdata2}))
+ {
+ $result_funcdata{$_} = $funcdata2->{$_};
+ }
+
+ # Combine testdata
+ foreach $testname (keys(%{$testdata1}))
+ {
+ if (defined($testdata2->{$testname}))
+ {
+ # testname is present in both entries, requires
+ # combination
+ ($result_testdata{$testname}) =
+ add_counts($testdata1->{$testname},
+ $testdata2->{$testname});
+ }
+ else
+ {
+ # testname only present in entry1, add to result
+ $result_testdata{$testname} = $testdata1->{$testname};
+ }
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ foreach $testname (keys(%{$testdata2}))
+ {
+ # Skip testnames already covered by previous iteration
+ if (defined($testdata1->{$testname})) { next; }
+
+ # testname only present in entry2, add to result hash
+ $result_testdata{$testname} = $testdata2->{$testname};
+
+ # update sum count hash
+ ($result_sumcount, $lines_found, $lines_hit) =
+ add_counts($result_sumcount,
+ $result_testdata{$testname});
+ }
+
+ # Calculate resulting sumcount
+
+ # Store result
+ set_info_entry(\%result, \%result_testdata, $result_sumcount,
+ \%result_funcdata, $lines_found, $lines_hit);
+
+ return(\%result);
+}
+
+
+#
+# combine_info_files(info_ref1, info_ref2)
+#
+# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
+# reference to resulting hash.
+#
+
+sub combine_info_files($$)
+{
+ my %hash1 = %{$_[0]};
+ my %hash2 = %{$_[1]};
+ my $filename;
+
+ foreach $filename (keys(%hash2))
+ {
+ if ($hash1{$filename})
+ {
+ # Entry already exists in hash1, combine them
+ $hash1{$filename} =
+ combine_info_entries($hash1{$filename},
+ $hash2{$filename});
+ }
+ else
+ {
+ # Entry is unique in both hashes, simply add to
+ # resulting hash
+ $hash1{$filename} = $hash2{$filename};
+ }
+ }
+
+ return(\%hash1);
+}
+
+
+#
+# apply_prefix(filename, prefix)
+#
+# If FILENAME begins with PREFIX, remove PREFIX from FILENAME and return
+# resulting string, otherwise return FILENAME.
+#
+
+sub apply_prefix($$)
+{
+ my $filename = $_[0];
+ my $prefix = $_[1];
+ my $clean_prefix = escape_regexp($prefix);
+
+ if (defined($prefix) && ($prefix ne ""))
+ {
+ if ($filename =~ /^$clean_prefix\/(.*)$/)
+ {
+ return substr($filename, length($prefix) + 1);
+ }
+ }
+
+ return $filename;
+}
+
+
+#
+# escape_regexp(string)
+#
+# Escape special characters in STRING which would be incorrectly interpreted
+# in a PERL regular expression.
+#
+
+sub escape_regexp($)
+{
+ my $string = $_[0];
+
+ # Escape special characters
+ $string =~ s/\\/\\\\/g;
+ $string =~ s/\^/\\\^/g;
+ $string =~ s/\$/\\\$/g;
+ $string =~ s/\./\\\./g;
+ $string =~ s/\|/\\\|/g;
+ $string =~ s/\(/\\\(/g;
+ $string =~ s/\)/\\\)/g;
+ $string =~ s/\[/\\\[/g;
+ $string =~ s/\]/\\\]/g;
+ $string =~ s/\*/\\\*/g;
+ $string =~ s/\?/\\\?/g;
+ $string =~ s/\{/\\\{/g;
+ $string =~ s/\}/\\\}/g;
+ $string =~ s/\+/\\\+/g;
+
+ return $string;
+}
diff --git a/third_party/lcov/contrib/galaxy/posterize.pl b/third_party/lcov/contrib/galaxy/posterize.pl
new file mode 100755
index 0000000..1b2895ed
--- /dev/null
+++ b/third_party/lcov/contrib/galaxy/posterize.pl
@@ -0,0 +1,312 @@
+#!/usr/bin/perl
+#
+# Copyright (c) International Business Machines Corp., 2002
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# posterize.pl
+#
+# This script generates a postscript file from output generated from the
+# fcgp http://sourceforge.net/projects/fcgp/ for plotting
+#
+#
+# History:
+# 2003-09-04 wrote - James M Kenefick Jr. <jkenefic@us.ibm.com>
+#
+
+
+
+# a good deal of this could be turned in to cli
+# arguments.
+
+# Constants
+my $Title = "Linux Kernel Coverage";
+my $KernelVersion = "2.5.73";
+my $TestDescription = "A Sample Print";
+my $Image = "../lgp/image.ps";
+
+# Variables
+my $Bounds = "";
+# Paper sizes in inches
+my $PAPER_WIDTH = 34;
+my $PAPER_HEIGHT = 42;
+
+# points per inch
+my $ppi = 72;
+
+# Margins
+my $TopMargin = 1;
+my $BottomMargin = 1.5;
+my $LeftMargin = 1;
+my $RightMargin = 1;
+
+
+$RightMargin = $PAPER_WIDTH - $RightMargin;
+$TopMargin = $PAPER_HEIGHT - $TopMargin;
+
+my $filename = "poster.ps";
+
+# Sizes in ppi
+my $PPI_WIDTH = ($PAPER_WIDTH * $ppi);
+my $PPI_HEIGHT = ($PAPER_HEIGHT * $ppi);
+
+# Date we create poster
+my $date = `date`;
+
+print STDERR "Creating Poster\n";
+
+open POSTER, ">$filename";
+
+
+
+print(POSTER <<END_OF_USAGE);
+%!PS-Adobe-1.0
+%%DocumentFonts: Helvetica Helvetica-Bold
+%%Title: Linux 2.4.0 Kernel Poster
+%%Creator: Rusty's scripts and postersize (GPL)
+%%CreationDate: $date
+%%Pages: 1
+%%BoundingBox: 0 0 $PPI_WIDTH $PPI_HEIGHT
+%%EndComments
+%!
+/PRorig_showpage_x178313 /showpage load def /showpage{
+ errordict /handleerror {} put
+ }def
+/initgraphics{}def/setpagedevice{pop}def
+statusdict begin /a4tray{}def /lettertray{}def end
+/a4{}def/a3{}def/a0{}def/letter{}def/legal{}def
+/a4small{}def /lettersmall{}def /a4tray{}def /lettertray{}def
+/setscreen{pop pop pop}def
+/ColorManagement {pop} def
+
+
+/A {gsave newpath 0 360 arc stroke grestore} bind def
+/M {moveto} bind def
+/L {lineto} bind def
+/D {[] 0 setdash} bind def
+/D5 {[5] 0 setdash} bind def
+/C0 {0 0 0 setrgbcolor} bind def
+/C1 {.8 .4 .4 setrgbcolor} bind def
+/C2 {.5 1 .5 setrgbcolor} bind def
+/C3 {0 .7 0 setrgbcolor} bind def
+/C4 {1 0 0 setrgbcolor} bind def
+/C5 {0 0 1 setrgbcolor} bind def
+/R {grestore} bind def
+/S {0 0 M stroke} bind def
+/T {gsave translate} bind def
+/U {C0 newpath 4 copy 4 2 roll 8 7 roll M L L L closepath stroke
+C1 findfont exch scalefont setfont M show} bind def
+
+% Added James M Kenefick Jr.
+/Hi_Color {0 0 1} def
+/Med_Color {0 .60 1} def
+/Lo_Color {0 1 1} def
+/None_Color {.75 .75 .75} def
+/Hi {newpath 4 copy 4 2 roll 8 7 roll M L L L Hi_Color setrgbcolor fill closepath} bind def
+/Med {newpath 4 copy 4 2 roll 8 7 roll M L L L Med_Color setrgbcolor fill closepath} bind def
+/Lo {newpath 4 copy 4 2 roll 8 7 roll M L L L Lo_Color setrgbcolor fill closepath} bind def
+/None {newpath 4 copy 4 2 roll 8 7 roll M L L L None_Color setrgbcolor fill closepath} bind def
+
+/inch
+{
+ 72 mul
+}
+def
+
+/LeftMargin $LeftMargin inch def
+/RightMargin $RightMargin inch def
+/TopMargin $TopMargin inch def
+/BottomMargin $BottomMargin inch def
+/FontScale 25 def
+/AuthorFontScale 70 def
+
+/centerText
+{
+ dup
+ stringwidth pop
+ 2 div
+ RightMargin LeftMargin sub 2 div
+ exch sub
+ LeftMargin add
+ NextLine moveto
+ show
+}
+def
+
+/upLine
+{
+ /NextLine
+ NextLine LineSpace2 add
+ def
+}
+def
+
+/advanceLine
+{
+ /NextLine
+ NextLine LineSpace sub
+ def
+}
+def
+
+/fontScale
+{
+ TopMargin BottomMargin sub FontScale div
+}
+def
+
+/authorfontScale
+{
+ TopMargin BottomMargin sub AuthorFontScale div
+}
+def
+
+/rightJustify
+{
+ dup
+ stringwidth pop
+ RightMargin 1 inch sub
+ exch sub
+ NextLine moveto
+ show
+}
+def
+
+/usableY
+{
+ TopMargin LineSpace 3 mul sub BottomMargin sub
+}
+def
+
+/usableX
+{
+ RightMargin LeftMargin sub
+}
+def
+gsave
+/Times-Roman findfont fontScale scalefont setfont
+/LineSpace fontScale def
+/NextLine (B) stringwidth pop TopMargin exch sub def
+
+%%EndProlog
+%%Page 1
+% title
+
+($Title) centerText advanceLine
+(Kernel: $KernelVersion) centerText advanceLine
+($TestDescription) centerText
+
+% Author Block
+LeftMargin BottomMargin translate
+/Times-Roman findfont authorfontScale scalefont setfont
+/LineSpace2 authorfontScale def
+/NextLine 0 def
+(Based on work by Rusty Russell, Christian Reiniger) rightJustify
+upLine
+(By James M. Kenefick Jr.) rightJustify
+
+grestore
+LeftMargin BottomMargin translate
+
+% Key Block
+15 15 scale
+% This is the key for the graph.
+
+/box { newpath moveto 0 1 rlineto 2 0 rlineto 0 -1 rlineto closepath } def
+/key { setrgbcolor 2 copy box gsave fill grestore 0 0 0 setrgbcolor strokepath fill moveto 2.4 0.25 rmoveto show } def
+
+/Helvetica-Oblique findfont
+1 scalefont setfont
+0.1 setlinewidth
+
+(static functions) 1 5 0.5 1 0.5 key % Light green.
+(indirectly called functions) 1 7 0 0.7 0 key % green
+(exported functions) 1 9 1 0 0 key % red
+(other functions) 1 11 0 0 1 key % blue
+
+(Low Coverage) 1 15 Lo_Color key % blue
+(Medium Coverage) 1 17 Med_Color key % blue
+(Hi Coverage) 1 19 Hi_Color key % blue
+(No Coverage) 1 21 None_Color key % blue
+1 3.25 moveto
+0.8 0.4 0.4 setrgbcolor
+/Helvetica findfont
+1 scalefont setfont
+(xxx) show
+1 3 moveto
+2.4 0.25 rmoveto
+0 0 0 setrgbcolor
+/Helvetica-Oblique findfont
+1 scalefont setfont
+(function name) show
+
+1 1.25 moveto
+0.8 0.4 0.4 setrgbcolor
+/Helvetica-Bold findfont
+1 scalefont setfont
+(xxx) show
+1 1 moveto
+2.4 0.25 rmoveto
+0 0 0 setrgbcolor
+/Helvetica-Oblique findfont
+1 scalefont setfont
+(source filename) show
+
+6 24 moveto
+/Helvetica-Bold findfont
+2 scalefont setfont
+(Key) show
+
+% Box around it
+newpath
+0.2 0.2 moveto
+0.2 27 lineto
+17 27 lineto
+17 0.2 lineto
+closepath
+strokepath fill
+
+
+1 15 div 1 15 div scale
+
+% find and move to center
+END_OF_USAGE
+
+# Find the bounds for the image
+
+$Bounds = `tail -1 $Image`;
+($Junk, $Junk, $minX, $minY, $maxX, $maxY) = split / /, $Bounds;
+
+my $xRange = $maxX - $minX;
+my $yRange = $maxY - $minY;
+
+if ($xRange < $yRange){
+ $Range = $xRange;
+} else {
+ $Range = $yRange;
+}
+print POSTER " 0 usableY usableX sub 2 div translate\n";
+print POSTER "usableX $Range div usableX $Range div scale\n";
+print POSTER "$Range 2 div $Range 2 div translate\n";
+print POSTER "gsave\n";
+# Paste in actual image.
+print POSTER `cat /home/lgp/image.ps`;
+print POSTER "%%Trailer\n";
+print POSTER "grestore\n";
+print POSTER "showpage\n";
+print POSTER "PRorig_showpage_x178313\n";
+print POSTER "/showpage /PRorig_showpage_x178313 load def\n";
+