X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=testcases%2Fcomplete-run.pl;h=61f2ef52451115e3051e3a6d88d0945fb41350f7;hb=dd5be77d1dcc7e7eb40ce1c33f985816fc0b8f79;hp=15def35c85516332c5acc3f771969810c573da48;hpb=e09e077b763bb1ff1975998c87d017afa9c319c3;p=i3%2Fi3 diff --git a/testcases/complete-run.pl b/testcases/complete-run.pl index 15def35c..61f2ef52 100755 --- a/testcases/complete-run.pl +++ b/testcases/complete-run.pl @@ -1,10 +1,11 @@ #!/usr/bin/env perl # vim:ts=4:sw=4:expandtab -# © 2010-2011 Michael Stapelberg and contributors +# © 2010-2012 Michael Stapelberg and contributors package complete_run; use strict; use warnings; use v5.10; +use utf8; # the following are modules which ship with Perl (>= 5.10): use Pod::Usage; use Cwd qw(abs_path); @@ -15,9 +16,10 @@ use TAP::Harness; use TAP::Parser; use TAP::Parser::Aggregator; use Time::HiRes qw(time); +use IO::Handle; # these are shipped with the testsuite use lib qw(lib); -use StartXDummy; +use StartXServer; use StatusLine; use TestWorker; # the following modules are not shipped with Perl @@ -28,6 +30,9 @@ use AnyEvent::I3 qw(:all); use X11::XCB::Connection; use JSON::XS; # AnyEvent::I3 depends on it, too. +binmode STDOUT, ':utf8'; +binmode STDERR, ':utf8'; + # Close superfluous file descriptors which were passed by running in a VIM # subshell or situations like that. AnyEvent::Util::close_all_fds_except(0, 1, 2); @@ -38,7 +43,7 @@ sub Log { say $log "@_" } my %timings; my $help = 0; -# Number of tests to run in parallel. Important to know how many Xdummy +# Number of tests to run in parallel. Important to know how many Xephyr # instances we need to start (unless @displays are given). Defaults to # num_cores * 2. my $parallel = undef; @@ -46,14 +51,18 @@ my @displays = (); my %options = ( valgrind => 0, strace => 0, + xtrace => 0, coverage => 0, restart => 0, ); +my $keep_xserver_output = 0; my $result = GetOptions( "coverage-testing" => \$options{coverage}, + "keep-xserver-output" => \$keep_xserver_output, "valgrind" => \$options{valgrind}, "strace" => \$options{strace}, + "xtrace" => \$options{xtrace}, "display=s" => \@displays, "parallel=i" => \$parallel, "help|?" => \$help, @@ -61,6 +70,25 @@ my $result = GetOptions( pod2usage(-verbose => 2, -exitcode => 0) if $help; +# Check for missing executables +my @binaries = qw( + ../i3 + ../i3bar/i3bar + ../i3-config-wizard/i3-config-wizard + ../i3-dump-log/i3-dump-log + ../i3-input/i3-input + ../i3-msg/i3-msg + ../i3-nagbar/i3-nagbar + ); + +foreach my $binary (@binaries) { + die "$binary executable not found, did you run “make”?" unless -e $binary; + die "$binary is not an executable" unless -x $binary; +} + +qx(Xephyr -help 2>&1); +die "Xephyr was not found in your path. Please install Xephyr (xserver-xephyr on Debian)." if $?; + @displays = split(/,/, join(',', @displays)); @displays = map { s/ //g; $_ } @displays; @@ -72,16 +100,9 @@ my @testfiles = @ARGV; my $numtests = scalar @testfiles; -# When the user specifies displays, we don’t run multi-monitor tests at all -# (because we don’t know which displaynumber is the X-Server with multiple -# monitors). -my $multidpy = undef; - -# No displays specified, let’s start some Xdummy instances. +# No displays specified, let’s start some Xephyr instances. if (@displays == 0) { - my $dpyref; - ($dpyref, $multidpy) = start_xdummy($parallel, $numtests); - @displays = @$dpyref; + @displays = start_xserver($parallel, $numtests, $keep_xserver_output); } # 1: create an output directory for this test-run @@ -90,15 +111,14 @@ $outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime()); $outdir .= `git describe --tags`; chomp($outdir); mkdir($outdir) or die "Could not create $outdir"; -unlink("latest") if -e "latest"; +unlink("latest") if -l "latest"; symlink("$outdir", "latest") or die "Could not symlink latest to $outdir"; # connect to all displays for two reasons: # 1: check if the display actually works # 2: keep the connection open so that i3 is not the only client. this prevents -# the X server from exiting (Xdummy will restart it, but not quick enough -# sometimes) +# the X server from exiting my @single_worker; for my $display (@displays) { my $screen; @@ -111,19 +131,9 @@ for my $display (@displays) { } } -my @multi_worker; -if (defined($multidpy)) { - my $x = X11::XCB::Connection->new(display => $multidpy); - if ($x->has_error) { - die "Could not connect to multi-monitor display $multidpy\n"; - } else { - push @multi_worker, worker($multidpy, $x, $outdir, \%options); - } -} - # Read previous timing information, if available. We will be able to roughly # predict the test duration and schedule a good order for the tests. -my $timingsjson = StartXDummy::slurp('.last_run_timings.json'); +my $timingsjson = StartXServer::slurp('.last_run_timings.json'); %timings = %{decode_json($timingsjson)} if length($timingsjson) > 0; # Re-order the files so that those which took the longest time in the previous @@ -133,6 +143,12 @@ my $timingsjson = StartXDummy::slurp('.last_run_timings.json'); sort { $b->[1] <=> $a->[1] } map { [$_, $timings{$_} // 999] } @testfiles; +# Run 000-load-deps.t first to bail out early when dependencies are missing. +my $loadtest = "t/000-load-deps.t"; +if ((scalar grep { $_ eq $loadtest } @testfiles) > 0) { + @testfiles = ($loadtest, grep { $_ ne $loadtest } @testfiles); +} + printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL}) if exists($timings{GLOBAL}); @@ -142,6 +158,7 @@ printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL}) my $logfile = "$outdir/complete-run.log"; open $log, '>', $logfile or die "Could not create '$logfile': $!"; +$log->autoflush(1); say "Writing logfile to '$logfile'..."; # 3: run all tests @@ -149,30 +166,21 @@ my @done; my $num = @testfiles; my $harness = TAP::Harness->new({ }); -my @single_monitor_tests = grep { m,^t/([0-9]+)-, && $1 < 500 } @testfiles; -my @multi_monitor_tests = grep { m,^t/([0-9]+)-, && $1 >= 500 } @testfiles; - my $aggregator = TAP::Parser::Aggregator->new(); $aggregator->start(); -status_init(displays => [ @displays, $multidpy ], tests => $num); +status_init(displays => \@displays, tests => $num); my $single_cv = AE::cv; -my $multi_cv = AE::cv; # We start tests concurrently: For each display, one test gets started. Every # test starts another test after completing. for (@single_worker) { $single_cv->begin; - take_job($_, $single_cv, \@single_monitor_tests); -} -for (@multi_worker) { - $multi_cv->begin; - take_job($_, $multi_cv, \@multi_monitor_tests); + take_job($_, $single_cv, \@testfiles); } $single_cv->recv; -$multi_cv->recv; $aggregator->stop(); @@ -214,7 +222,7 @@ printf("\t%s with %.2f seconds\n", $_, $timings{$_}) if ($numtests == 1) { say ''; say 'Test output:'; - say StartXDummy::slurp($logfile); + say StartXServer::slurp($logfile); } END { cleanup() } @@ -287,9 +295,16 @@ sub take_job { for (1 .. $lines) { my $result = $parser->next; - if (defined($result) and $result->is_test) { + next unless defined($result); + if ($result->is_test) { $tests_completed++; status($display, "$test: [$tests_completed/??] "); + } elsif ($result->is_bailout) { + Log status($display, "$test: BAILOUT"); + status_completed(scalar @done); + say ""; + say "test $test bailed out: " . $result->explanation; + exit 1; } } @@ -333,7 +348,7 @@ complete-run.pl [files...] =head1 EXAMPLE -To run the whole testsuite on a reasonable number of Xdummy instances (your +To run the whole testsuite on a reasonable number of Xephyr instances (your running X11 will not be touched), run: ./complete-run.pl @@ -352,11 +367,11 @@ will parallelize the tests: # Run tests on the second X server ./complete-run.pl -d :1 - # Run four tests in parallel on some Xdummy servers + # Run four tests in parallel on some Xephyr servers ./complete-run.pl -d :1,:2,:3,:4 Note that it is not necessary to specify this anymore. If omitted, -complete-run.pl will start (num_cores * 2) Xdummy instances. +complete-run.pl will start (num_cores * 2) Xephyr instances. =item B<--valgrind> @@ -368,14 +383,19 @@ C. Runs i3 under strace to trace system calls. The output will be available in C. +=item B<--xtrace> + +Runs i3 under xtrace to trace X11 requests/replies. The output will be +available in C. + =item B<--coverage-testing> Exits i3 cleanly (instead of kill -9) to make coverage testing work properly. =item B<--parallel> -Number of Xdummy instances to start (if you don’t want to start num_cores * 2 +Number of Xephyr instances to start (if you don’t want to start num_cores * 2 instances for some reason). - # Run all tests on a single Xdummy instance + # Run all tests on a single Xephyr instance ./complete-run.pl -p 1