@echo USE_LIBPCRE=\''$(subst ','\'',$(subst ','\'',$(USE_LIBPCRE)))'\' >>$@
@echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@
@echo NO_PYTHON=\''$(subst ','\'',$(subst ','\'',$(NO_PYTHON)))'\' >>$@
+ @echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@
+ifdef GIT_TEST_OPTS
+ @echo GIT_TEST_OPTS=\''$(subst ','\'',$(subst ','\'',$(GIT_TEST_OPTS)))'\' >>$@
+endif
ifdef GIT_TEST_CMP
@echo GIT_TEST_CMP=\''$(subst ','\'',$(subst ','\'',$(GIT_TEST_CMP)))'\' >>$@
endif
endif
@echo NO_GETTEXT=\''$(subst ','\'',$(subst ','\'',$(NO_GETTEXT)))'\' >>$@
@echo GETTEXT_POISON=\''$(subst ','\'',$(subst ','\'',$(GETTEXT_POISON)))'\' >>$@
- @echo NO_UNIX_SOCKETS=\''$(subst ','\'',$(subst ','\'',$(NO_UNIX_SOCKETS)))'\' >>$@
+ifdef GIT_PERF_REPEAT_COUNT
+ @echo GIT_PERF_REPEAT_COUNT=\''$(subst ','\'',$(subst ','\'',$(GIT_PERF_REPEAT_COUNT)))'\' >>$@
+endif
+ifdef GIT_PERF_REPO
+ @echo GIT_PERF_REPO=\''$(subst ','\'',$(subst ','\'',$(GIT_PERF_REPO)))'\' >>$@
+endif
+ifdef GIT_PERF_LARGE_REPO
+ @echo GIT_PERF_LARGE_REPO=\''$(subst ','\'',$(subst ','\'',$(GIT_PERF_LARGE_REPO)))'\' >>$@
+endif
+ifdef GIT_PERF_MAKE_OPTS
+ @echo GIT_PERF_MAKE_OPTS=\''$(subst ','\'',$(subst ','\'',$(GIT_PERF_MAKE_OPTS)))'\' >>$@
+endif
### Detect Tck/Tk interpreter path changes
ifndef NO_TCLTK
test: all
$(MAKE) -C t/ all
+perf: all
+ $(MAKE) -C t/perf/ all
+
+.PHONY: test perf
+
test-ctype$X: ctype.o
test-date$X: date.o ctype.o
valgrind:
$(MAKE) GIT_TEST_OPTS="$(GIT_TEST_OPTS) --valgrind"
-.PHONY: pre-clean $(T) aggregate-results clean valgrind
+perf:
+ $(MAKE) -C perf/ all
+
+# Smoke testing targets
+-include ../GIT-VERSION-FILE
+uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo unknown')
+uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo unknown')
+
+test-results:
+ mkdir -p test-results
+
+test-results/git-smoke.tar.gz: test-results
+ $(PERL_PATH) ./harness \
+ --archive="test-results/git-smoke.tar.gz" \
+ $(T)
+
+smoke: test-results/git-smoke.tar.gz
+
+SMOKE_UPLOAD_FLAGS =
+ifdef SMOKE_USERNAME
+ SMOKE_UPLOAD_FLAGS += -F username="$(SMOKE_USERNAME)" -F password="$(SMOKE_PASSWORD)"
+endif
+ifdef SMOKE_COMMENT
+ SMOKE_UPLOAD_FLAGS += -F comments="$(SMOKE_COMMENT)"
+endif
+ifdef SMOKE_TAGS
+ SMOKE_UPLOAD_FLAGS += -F tags="$(SMOKE_TAGS)"
+endif
+
+smoke_report: smoke
+ curl \
+ -H "Expect: " \
+ -F project=Git \
+ -F architecture="$(uname_M)" \
+ -F platform="$(uname_S)" \
+ -F revision="$(GIT_VERSION)" \
+ -F report_file=@test-results/git-smoke.tar.gz \
+ $(SMOKE_UPLOAD_FLAGS) \
+ http://smoke.git.nix.is/app/projects/process_add_report/1 \
+ | grep -v ^Redirecting
+
+.PHONY: pre-clean $(T) aggregate-results clean valgrind perf
--- /dev/null
+build/
+test-results/
--- /dev/null
+-include ../../config.mak
+export GIT_TEST_OPTIONS
+
+all: perf
+
+perf: pre-clean
+ ./run
+
+pre-clean:
+ rm -rf test-results
+
+clean:
+ rm -rf build "trash directory".* test-results
+
+.PHONY: all perf pre-clean clean
--- /dev/null
+Git performance tests
+=====================
+
+This directory holds performance testing scripts for git tools. The
+first part of this document describes the various ways in which you
+can run them.
+
+When fixing the tools or adding enhancements, you are strongly
+encouraged to add tests in this directory to cover what you are
+trying to fix or enhance. The later part of this short document
+describes how your test scripts should be organized.
+
+
+Running Tests
+-------------
+
+The easiest way to run tests is to say "make". This runs all
+the tests on the current git repository.
+
+ === Running 2 tests in this tree ===
+ [...]
+ Test this tree
+ ---------------------------------------------------------
+ 0001.1: rev-list --all 0.54(0.51+0.02)
+ 0001.2: rev-list --all --objects 6.14(5.99+0.11)
+ 7810.1: grep worktree, cheap regex 0.16(0.16+0.35)
+ 7810.2: grep worktree, expensive regex 7.90(29.75+0.37)
+ 7810.3: grep --cached, cheap regex 3.07(3.02+0.25)
+ 7810.4: grep --cached, expensive regex 9.39(30.57+0.24)
+
+You can compare multiple repositories and even git revisions with the
+'run' script:
+
+ $ ./run . origin/next /path/to/git-tree p0001-rev-list.sh
+
+where . stands for the current git tree. The full invocation is
+
+ ./run [<revision|directory>...] [--] [<test-script>...]
+
+A '.' argument is implied if you do not pass any other
+revisions/directories.
+
+You can also manually test this or another git build tree, and then
+call the aggregation script to summarize the results:
+
+ $ ./p0001-rev-list.sh
+ [...]
+ $ GIT_BUILD_DIR=/path/to/other/git ./p0001-rev-list.sh
+ [...]
+ $ ./aggregate.perl . /path/to/other/git ./p0001-rev-list.sh
+
+aggregate.perl has the same invocation as 'run', it just does not run
+anything beforehand.
+
+You can set the following variables (also in your config.mak):
+
+ GIT_PERF_REPEAT_COUNT
+ Number of times a test should be repeated for best-of-N
+ measurements. Defaults to 5.
+
+ GIT_PERF_MAKE_OPTS
+ Options to use when automatically building a git tree for
+ performance testing. E.g., -j6 would be useful.
+
+ GIT_PERF_REPO
+ GIT_PERF_LARGE_REPO
+ Repositories to copy for the performance tests. The normal
+ repo should be at least git.git size. The large repo should
+ probably be about linux-2.6.git size for optimal results.
+ Both default to the git.git you are running from.
+
+You can also pass the options taken by ordinary git tests; the most
+useful one is:
+
+--root=<directory>::
+ Create "trash" directories used to store all temporary data during
+ testing under <directory>, instead of the t/ directory.
+ Using this option with a RAM-based filesystem (such as tmpfs)
+ can massively speed up the test suite.
+
+
+Naming Tests
+------------
+
+The performance test files are named as:
+
+ pNNNN-commandname-details.sh
+
+where N is a decimal digit. The same conventions for choosing NNNN as
+for normal tests apply.
+
+
+Writing Tests
+-------------
+
+The perf script starts much like a normal test script, except it
+sources perf-lib.sh:
+
+ #!/bin/sh
+ #
+ # Copyright (c) 2005 Junio C Hamano
+ #
+
+ test_description='xxx performance test'
+ . ./perf-lib.sh
+
+After that you will want to use some of the following:
+
+ test_perf_default_repo # sets up a "normal" repository
+ test_perf_large_repo # sets up a "large" repository
+
+ test_perf_default_repo sub # ditto, in a subdir "sub"
+
+ test_checkout_worktree # if you need the worktree too
+
+At least one of the first two is required!
+
+You can use test_expect_success as usual. For actual performance
+tests, use
+
+ test_perf 'descriptive string' '
+ command1 &&
+ command2
+ '
+
+test_perf spawns a subshell, for lack of better options. This means
+that
+
+* you _must_ export all variables that you need in the subshell
+
+* you _must_ flag all variables that you want to persist from the
+ subshell with 'test_export':
+
+ test_perf 'descriptive string' '
+ foo=$(git rev-parse HEAD) &&
+ test_export foo
+ '
+
+ The so-exported variables are automatically marked for export in the
+ shell executing the perf test. For your convenience, test_export is
+ the same as export in the main shell.
+
+ This feature relies on a bit of magic using 'set' and 'source'.
+ While we have tried to make sure that it can cope with embedded
+ whitespace and other special characters, it will not work with
+ multi-line data.
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Git;
+
+sub get_times {
+ my $name = shift;
+ open my $fh, "<", $name or return undef;
+ my $line = <$fh>;
+ return undef if not defined $line;
+ close $fh or die "cannot close $name: $!";
+ $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
+ or die "bad input line: $line";
+ my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+ return ($rt, $4, $5);
+}
+
+sub format_times {
+ my ($r, $u, $s, $firstr) = @_;
+ if (!defined $r) {
+ return "<missing>";
+ }
+ my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
+ if (defined $firstr) {
+ if ($firstr > 0) {
+ $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr;
+ } elsif ($r == 0) {
+ $out .= " =";
+ } else {
+ $out .= " +inf";
+ }
+ }
+ return $out;
+}
+
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests);
+while (scalar @ARGV) {
+ my $arg = $ARGV[0];
+ my $dir;
+ last if -f $arg or $arg eq "--";
+ if (! -d $arg) {
+ my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
+ $dir = "build/".$rev;
+ } else {
+ $arg =~ s{/*$}{};
+ $dir = $arg;
+ $dirabbrevs{$dir} = $dir;
+ }
+ push @dirs, $dir;
+ $dirnames{$dir} = $arg;
+ my $prefix = $dir;
+ $prefix =~ tr/^a-zA-Z0-9/_/c;
+ $prefixes{$dir} = $prefix . '.';
+ shift @ARGV;
+}
+
+if (not @dirs) {
+ @dirs = ('.');
+}
+$dirnames{'.'} = $dirabbrevs{'.'} = "this tree";
+$prefixes{'.'} = '';
+
+shift @ARGV if scalar @ARGV and $ARGV[0] eq "--";
+
+@tests = @ARGV;
+if (not @tests) {
+ @tests = glob "p????-*.sh";
+}
+
+my @subtests;
+my %shorttests;
+for my $t (@tests) {
+ $t =~ s{(?:.*/)?(p(\d+)-[^/]+)\.sh$}{$1} or die "bad test name: $t";
+ my $n = $2;
+ my $fname = "test-results/$t.subtests";
+ open my $fp, "<", $fname or die "cannot open $fname: $!";
+ for (<$fp>) {
+ chomp;
+ /^(\d+)$/ or die "malformed subtest line: $_";
+ push @subtests, "$t.$1";
+ $shorttests{"$t.$1"} = "$n.$1";
+ }
+ close $fp or die "cannot close $fname: $!";
+}
+
+sub read_descr {
+ my $name = shift;
+ open my $fh, "<", $name or return "<error reading description>";
+ my $line = <$fh>;
+ close $fh or die "cannot close $name";
+ chomp $line;
+ return $line;
+}
+
+my %descrs;
+my $descrlen = 4; # "Test"
+for my $t (@subtests) {
+ $descrs{$t} = $shorttests{$t}.": ".read_descr("test-results/$t.descr");
+ $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen;
+}
+
+sub have_duplicate {
+ my %seen;
+ for (@_) {
+ return 1 if exists $seen{$_};
+ $seen{$_} = 1;
+ }
+ return 0;
+}
+sub have_slash {
+ for (@_) {
+ return 1 if m{/};
+ }
+ return 0;
+}
+
+my %newdirabbrevs = %dirabbrevs;
+while (!have_duplicate(values %newdirabbrevs)) {
+ %dirabbrevs = %newdirabbrevs;
+ last if !have_slash(values %dirabbrevs);
+ %newdirabbrevs = %dirabbrevs;
+ for (values %newdirabbrevs) {
+ s{^[^/]*/}{};
+ }
+}
+
+my %times;
+my @colwidth = ((0)x@dirs);
+for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+ $colwidth[$i] = $w if $w > $colwidth[$i];
+}
+for my $t (@subtests) {
+ my $firstr;
+ for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ $times{$prefixes{$d}.$t} = [get_times("test-results/$prefixes{$d}$t.times")];
+ my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+ my $w = length format_times($r,$u,$s,$firstr);
+ $colwidth[$i] = $w if $w > $colwidth[$i];
+ $firstr = $r unless defined $firstr;
+ }
+}
+my $totalwidth = 3*@dirs+$descrlen;
+$totalwidth += $_ for (@colwidth);
+
+printf "%-${descrlen}s", "Test";
+for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ printf " %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+}
+print "\n";
+print "-"x$totalwidth, "\n";
+for my $t (@subtests) {
+ printf "%-${descrlen}s", $descrs{$t};
+ my $firstr;
+ for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+ printf " %-$colwidth[$i]s", format_times($r,$u,$s,$firstr);
+ $firstr = $r unless defined $firstr;
+ }
+ print "\n";
+}
--- /dev/null
+#!/usr/bin/perl
+
+my $minrt = 1e100;
+my $min;
+
+while (<>) {
+ # [h:]m:s.xx U.xx S.xx
+ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
+ or die "bad input line: $_";
+ my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+ if ($rt < $minrt) {
+ $min = $_;
+ $minrt = $rt;
+ }
+}
+
+if (!defined $min) {
+ die "no input found";
+}
+
+print $min;
--- /dev/null
+#!/bin/sh
+
+test_description='Tests whether perf-lib facilities work'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_perf 'test_perf_default_repo works' '
+ foo=$(git rev-parse HEAD) &&
+ test_export foo
+'
+
+test_checkout_worktree
+
+test_perf 'test_checkout_worktree works' '
+ wt=$(find . | wc -l) &&
+ idx=$(git ls-files | wc -l) &&
+ test $wt -gt $idx
+'
+
+baz=baz
+test_export baz
+
+test_expect_success 'test_export works' '
+ echo "$foo" &&
+ test "$foo" = "$(git rev-parse HEAD)" &&
+ echo "$baz" &&
+ test "$baz" = baz
+'
+
+test_perf 'export a weird var' '
+ bar="weird # variable" &&
+ test_export bar
+'
+
+test_expect_success 'test_export works with weird vars' '
+ echo "$bar" &&
+ test "$bar" = "weird # variable"
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description="Tests history walking performance"
+
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_perf 'rev-list --all' '
+ git rev-list --all >/dev/null
+'
+
+test_perf 'rev-list --all --objects' '
+ git rev-list --all --objects >/dev/null
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2011 Thomas Rast
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see http://www.gnu.org/licenses/ .
+
+# do the --tee work early; it otherwise confuses our careful
+# GIT_BUILD_DIR mangling
+case "$GIT_TEST_TEE_STARTED, $* " in
+done,*)
+ # do not redirect again
+ ;;
+*' --tee '*|*' --va'*)
+ mkdir -p test-results
+ BASE=test-results/$(basename "$0" .sh)
+ (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
+ echo $? > $BASE.exit) | tee $BASE.out
+ test "$(cat $BASE.exit)" = 0
+ exit
+ ;;
+esac
+
+TEST_DIRECTORY=$(pwd)/..
+TEST_OUTPUT_DIRECTORY=$(pwd)
+if test -z "$GIT_TEST_INSTALLED"; then
+ perf_results_prefix=
+else
+ perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
+ # make the tested dir absolute
+ GIT_TEST_INSTALLED=$(cd "$GIT_TEST_INSTALLED" && pwd)
+fi
+
+TEST_NO_CREATE_REPO=t
+
+. ../test-lib.sh
+
+perf_results_dir=$TEST_OUTPUT_DIRECTORY/test-results
+mkdir -p "$perf_results_dir"
+rm -f "$perf_results_dir"/$(basename "$0" .sh).subtests
+
+if test -z "$GIT_PERF_REPEAT_COUNT"; then
+ GIT_PERF_REPEAT_COUNT=3
+fi
+die_if_build_dir_not_repo () {
+ if ! ( cd "$TEST_DIRECTORY/.." &&
+ git rev-parse --build-dir >/dev/null 2>&1 ); then
+ error "No $1 defined, and your build directory is not a repo"
+ fi
+}
+
+if test -z "$GIT_PERF_REPO"; then
+ die_if_build_dir_not_repo '$GIT_PERF_REPO'
+ GIT_PERF_REPO=$TEST_DIRECTORY/..
+fi
+if test -z "$GIT_PERF_LARGE_REPO"; then
+ die_if_build_dir_not_repo '$GIT_PERF_LARGE_REPO'
+ GIT_PERF_LARGE_REPO=$TEST_DIRECTORY/..
+fi
+
+test_perf_create_repo_from () {
+ test "$#" = 2 ||
+ error "bug in the test script: not 2 parameters to test-create-repo"
+ repo="$1"
+ source="$2"
+ source_git=$source/$(cd "$source" && git rev-parse --git-dir)
+ mkdir -p "$repo/.git"
+ (
+ cd "$repo/.git" &&
+ { cp -Rl "$source_git/objects" . 2>/dev/null ||
+ cp -R "$source_git/objects" .; } &&
+ for stuff in "$source_git"/*; do
+ case "$stuff" in
+ */objects|*/hooks|*/config)
+ ;;
+ *)
+ cp -R "$stuff" . || break
+ ;;
+ esac
+ done &&
+ cd .. &&
+ git init -q &&
+ mv .git/hooks .git/hooks-disabled 2>/dev/null
+ ) || error "failed to copy repository '$source' to '$repo'"
+}
+
+# call at least one of these to establish an appropriately-sized repository
+test_perf_default_repo () {
+ test_perf_create_repo_from "${1:-$TRASH_DIRECTORY}" "$GIT_PERF_REPO"
+}
+test_perf_large_repo () {
+ if test "$GIT_PERF_LARGE_REPO" = "$GIT_BUILD_DIR"; then
+ echo "warning: \$GIT_PERF_LARGE_REPO is \$GIT_BUILD_DIR." >&2
+ echo "warning: This will work, but may not be a sufficiently large repo" >&2
+ echo "warning: for representative measurements." >&2
+ fi
+ test_perf_create_repo_from "${1:-$TRASH_DIRECTORY}" "$GIT_PERF_LARGE_REPO"
+}
+test_checkout_worktree () {
+ git checkout-index -u -a ||
+ error "git checkout-index failed"
+}
+
+# Performance tests should never fail. If they do, stop immediately
+immediate=t
+
+test_run_perf_ () {
+ test_cleanup=:
+ test_export_="test_cleanup"
+ export test_cleanup test_export_
+ /usr/bin/time -f "%E %U %S" -o test_time.$i "$SHELL" -c '
+. '"$TEST_DIRECTORY"/../test-lib-functions.sh'
+test_export () {
+ [ $# != 0 ] || return 0
+ test_export_="$test_export_\\|$1"
+ shift
+ test_export "$@"
+}
+'"$1"'
+ret=$?
+set | sed -n "s'"/'/'\\\\''/g"';s/^\\($test_export_\\)/export '"'&'"'/p" >test_vars
+exit $ret' >&3 2>&4
+ eval_ret=$?
+
+ if test $eval_ret = 0 || test -n "$expecting_failure"
+ then
+ test_eval_ "$test_cleanup"
+ . ./test_vars || error "failed to load updated environment"
+ fi
+ if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE"; then
+ echo ""
+ fi
+ return "$eval_ret"
+}
+
+
+test_perf () {
+ test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
+ test "$#" = 2 ||
+ error "bug in the test script: not 2 or 3 parameters to test-expect-success"
+ export test_prereq
+ if ! test_skip "$@"
+ then
+ base=$(basename "$0" .sh)
+ echo "$test_count" >>"$perf_results_dir"/$base.subtests
+ echo "$1" >"$perf_results_dir"/$base.$test_count.descr
+ if test -z "$verbose"; then
+ echo -n "perf $test_count - $1:"
+ else
+ echo "perf $test_count - $1:"
+ fi
+ for i in $(seq 1 $GIT_PERF_REPEAT_COUNT); do
+ say >&3 "running: $2"
+ if test_run_perf_ "$2"
+ then
+ if test -z "$verbose"; then
+ echo -n " $i"
+ else
+ echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
+ fi
+ else
+ test -z "$verbose" && echo
+ test_failure_ "$@"
+ break
+ fi
+ done
+ if test -z "$verbose"; then
+ echo " ok"
+ else
+ test_ok_ "$1"
+ fi
+ base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+ "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+ fi
+ echo >&3 ""
+}
+
+# We extend test_done to print timings at the end (./run disables this
+# and does it after running everything)
+test_at_end_hook_ () {
+ if test -z "$GIT_PERF_AGGREGATING_LATER"; then
+ ( cd "$TEST_DIRECTORY"/perf && ./aggregate.perl $(basename "$0") )
+ fi
+}
+
+test_export () {
+ export "$@"
+}
--- /dev/null
+#!/bin/sh
+
+case "$1" in
+ --help)
+ echo "usage: $0 [other_git_tree...] [--] [test_scripts]"
+ exit 0
+ ;;
+esac
+
+die () {
+ echo >&2 "error: $*"
+ exit 1
+}
+
+run_one_dir () {
+ if test $# -eq 0; then
+ set -- p????-*.sh
+ fi
+ echo "=== Running $# tests in ${GIT_TEST_INSTALLED:-this tree} ==="
+ for t in "$@"; do
+ ./$t $GIT_TEST_OPTS
+ done
+}
+
+unpack_git_rev () {
+ rev=$1
+ mkdir -p build/$rev
+ (cd "$(git rev-parse --show-cdup)" && git archive --format=tar $rev) |
+ (cd build/$rev && tar x)
+}
+build_git_rev () {
+ rev=$1
+ cp ../../config.mak build/$rev/config.mak
+ (cd build/$rev && make $GIT_PERF_MAKE_OPTS) ||
+ die "failed to build revision '$mydir'"
+}
+
+run_dirs_helper () {
+ mydir=${1%/}
+ shift
+ while test $# -gt 0 -a "$1" != -- -a ! -f "$1"; do
+ shift
+ done
+ if test $# -gt 0 -a "$1" = --; then
+ shift
+ fi
+ if [ ! -d "$mydir" ]; then
+ rev=$(git rev-parse --verify "$mydir" 2>/dev/null) ||
+ die "'$mydir' is neither a directory nor a valid revision"
+ if [ ! -d build/$rev ]; then
+ unpack_git_rev $rev
+ fi
+ build_git_rev $rev
+ mydir=build/$rev
+ fi
+ if test "$mydir" = .; then
+ unset GIT_TEST_INSTALLED
+ else
+ GIT_TEST_INSTALLED="$mydir/bin-wrappers"
+ export GIT_TEST_INSTALLED
+ fi
+ run_one_dir "$@"
+}
+
+run_dirs () {
+ while test $# -gt 0 -a "$1" != -- -a ! -f "$1"; do
+ run_dirs_helper "$@"
+ shift
+ done
+}
+
+GIT_PERF_AGGREGATING_LATER=t
+export GIT_PERF_AGGREGATING_LATER
+
+cd "$(dirname $0)"
+. ../../GIT-BUILD-OPTIONS
+
+if test $# = 0 -o "$1" = -- -o -f "$1"; then
+ set -- . "$@"
+fi
+run_dirs "$@"
+./aggregate.perl "$@"
.*_TEST
PROVE
VALGRIND
+ PERF_AGGREGATING_LATER
));
my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env);
print join("\n", @vars);
LF='
'
+export _x05 _x40 _z40 LF
+
# Each test should start with something like this, after copyright notices:
#
# test_description='Description of this test...
esac
}
+# stub; perf-lib overrides it
+test_at_end_hook_ () {
+ :
+}
+
test_done () {
GIT_EXIT_OK=t
if test -z "$HARNESS_ACTIVE"; then
- test_results_dir="$TEST_DIRECTORY/test-results"
+ test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results"
mkdir -p "$test_results_dir"
test_results_path="$test_results_dir/${0%.sh}-$$.counts"
cd "$(dirname "$remove_trash")" &&
rm -rf "$(basename "$remove_trash")"
+ test_at_end_hook_
+
exit 0 ;;
*)
# itself.
TEST_DIRECTORY=$(pwd)
fi
+if test -z "$TEST_OUTPUT_DIRECTORY"
+then
+ # Similarly, override this to store the test-results subdir
+ # elsewhere
+ TEST_OUTPUT_DIRECTORY=$TEST_DIRECTORY
+fi
GIT_BUILD_DIR="$TEST_DIRECTORY"/..
if test -n "$valgrind"
test -n "$root" && test="$root/$test"
case "$test" in
/*) TRASH_DIRECTORY="$test" ;;
- *) TRASH_DIRECTORY="$TEST_DIRECTORY/$test" ;;
+ *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$test" ;;
esac
test ! -z "$debug" || remove_trash=$TRASH_DIRECTORY
rm -fr "$test" || {
HOME="$TRASH_DIRECTORY"
export HOME
-test_create_repo "$test"
+if test -z "$TEST_NO_CREATE_REPO"; then
+ test_create_repo "$test"
+else
+ mkdir -p "$test"
+fi
# Use -P to resolve symlinks in our working directory so that the cwd
# in subprocesses like git equals our $PWD (for pathname comparisons).
cd -P "$test" || exit 1