use TAP::Harness;
use TAP::Parser;
use TAP::Parser::Aggregator;
+use Time::HiRes qw(time);
# these are shipped with the testsuite
use lib qw(lib);
use StartXDummy;
use AnyEvent::Handle;
use AnyEvent::I3 qw(:all);
use X11::XCB::Connection;
+use JSON::XS; # AnyEvent::I3 depends on it, too.
# Close superfluous file descriptors which were passed by running in a VIM
# subshell or situations like that.
my $log;
sub Log { say $log "@_" }
+my %timings;
my $coverage_testing = 0;
my $valgrind = 0;
my $strace = 0;
@displays = split(/,/, join(',', @displays));
@displays = map { s/ //g; $_ } @displays;
+# 2: get a list of all testcases
+my @testfiles = @ARGV;
+
+# if no files were passed on command line, run all tests from t/
+@testfiles = <t/*.t> if @testfiles == 0;
+
+my $numtests = scalar @testfiles;
+
+# When the user specifies displays, we don’t run multi-monitor tests at all
+# (because we don’t know which displaynumber is the X-Server with multiple
+# monitors).
+my $multidpy = undef;
+
# No displays specified, let’s start some Xdummy instances.
-@displays = start_xdummy($parallel) if @displays == 0;
+if (@displays == 0) {
+ my $dpyref;
+ ($dpyref, $multidpy) = start_xdummy($parallel, $numtests);
+ @displays = @$dpyref;
+}
# 1: create an output directory for this test-run
my $outdir = "testsuite-";
# 2: keep the connection open so that i3 is not the only client. this prevents
# the X server from exiting (Xdummy will restart it, but not quick enough
# sometimes)
-my @worker;
+my @single_worker;
for my $display (@displays) {
my $screen;
my $x = X11::XCB::Connection->new(display => $display);
die "Could not connect to display $display\n";
} else {
# start a TestWorker for each display
- push @worker, worker($display, $x, $outdir);
+ push @single_worker, worker($display, $x, $outdir);
}
}
-# 2: get a list of all testcases
-my @testfiles = @ARGV;
+my @multi_worker;
+if (defined($multidpy)) {
+ my $x = X11::XCB::Connection->new(display => $multidpy);
+ if ($x->has_error) {
+ die "Could not connect to multi-monitor display $multidpy\n";
+ } else {
+ push @multi_worker, worker($multidpy, $x, $outdir);
+ }
+}
-# if no files were passed on command line, run all tests from t/
-@testfiles = <t/*.t> if @testfiles == 0;
+# Read previous timing information, if available. We will be able to roughly
+# predict the test duration and schedule a good order for the tests.
+my $timingsjson = StartXDummy::slurp('.last_run_timings.json');
+%timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;
+
+# Re-order the files so that those which took the longest time in the previous
+# run will be started at the beginning to not delay the whole run longer than
+# necessary.
+@testfiles = map { $_->[0] }
+ sort { $b->[1] <=> $a->[1] }
+ map { [$_, $timings{$_} // 999] } @testfiles;
+
+printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
+ if exists($timings{GLOBAL});
+
+# Forget the old timings, we don’t necessarily run the same set of tests as
+# before. Otherwise we would end up with left-overs.
+%timings = (GLOBAL => time());
my $logfile = "$outdir/complete-run.log";
open $log, '>', $logfile or die "Could not create '$logfile': $!";
my $num = @testfiles;
my $harness = TAP::Harness->new({ });
+my @single_monitor_tests = grep { m,^t/([0-9]+)-, && $1 < 500 } @testfiles;
+my @multi_monitor_tests = grep { m,^t/([0-9]+)-, && $1 >= 500 } @testfiles;
+
my $aggregator = TAP::Parser::Aggregator->new();
$aggregator->start();
-status_init(displays => \@displays, tests => $num);
+status_init(displays => [ @displays, $multidpy ], tests => $num);
-my $cv = AE::cv;
+my $single_cv = AE::cv;
+my $multi_cv = AE::cv;
# We start tests concurrently: For each display, one test gets started. Every
# test starts another test after completing.
-for (@worker) { $cv->begin; take_job($_) }
+for (@single_worker) {
+ $single_cv->begin;
+ take_job($_, $single_cv, \@single_monitor_tests);
+}
+for (@multi_worker) {
+ $multi_cv->begin;
+ take_job($_, $multi_cv, \@multi_monitor_tests);
+}
-$cv->recv;
+$single_cv->recv;
+$multi_cv->recv;
$aggregator->stop();
close $log;
+# 5: Save the timings for better scheduling/prediction next run.
+$timings{GLOBAL} = time() - $timings{GLOBAL};
+open(my $fh, '>', '.last_run_timings.json');
+print $fh encode_json(\%timings);
+close($fh);
+
+# 6: Print the slowest test files.
+my @slowest = map { $_->[0] }
+ sort { $b->[1] <=> $a->[1] }
+ map { [$_, $timings{$_}] }
+ grep { !/^GLOBAL$/ } keys %timings;
+say '';
+say 'The slowest tests are:';
+printf("\t%s with %.2f seconds\n", $_, $timings{$_})
+ for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];
+
+# When we are running precisely one test, print the output. Makes developing
+# with a single testcase easier.
+if ($numtests == 1) {
+ say '';
+ say 'Test output:';
+ say StartXDummy::slurp($logfile);
+}
+
END { cleanup() }
exit 0;
# triggered to finish testing.
#
sub take_job {
- my ($worker) = @_;
+ my ($worker, $cv, $tests) = @_;
- my $test = shift @testfiles
+ my $test = shift @$tests
or return $cv->end;
my $display = $worker->{display};
Log status($display, "$test: starting");
+ $timings{$test} = time();
worker_next($worker, $test);
# create a TAP::Parser with an in-memory fh
return unless $t_eof;
Log status($display, "$test: finished");
+ $timings{$test} = time() - $timings{$test};
status_completed(scalar @done);
$aggregator->add($test, $parser);
push @done, [ $test, $output ];
undef $w;
- take_job($worker);
+ take_job($worker, $cv, $tests);
}
);
}