2 # vim:ts=4:sw=4:expandtab
3 # © 2010-2012 Michael Stapelberg and contributors
9 # the following are modules which ship with Perl (>= 5.10):
12 use File::Temp qw(tempfile tempdir);
17 use TAP::Parser::Aggregator;
18 use Time::HiRes qw(time);
20 # these are shipped with the testsuite
25 # the following modules are not shipped with Perl
29 use AnyEvent::I3 qw(:all);
30 use X11::XCB::Connection;
31 use JSON::XS; # AnyEvent::I3 depends on it, too.
33 binmode STDOUT, ':utf8';
34 binmode STDERR, ':utf8';
36 # Close superfluous file descriptors which were passed by running in a VIM
37 # subshell or situations like that.
38 AnyEvent::Util::close_all_fds_except(0, 1, 2);
40 # convinience wrapper to write to the log file
42 sub Log { say $log "@_" }
46 # Number of tests to run in parallel. Important to know how many Xdummy
47 # instances we need to start (unless @displays are given). Defaults to
58 my $keep_xdummy_output = 0;
60 my $result = GetOptions(
61 "coverage-testing" => \$options{coverage},
62 "keep-xdummy-output" => \$keep_xdummy_output,
63 "valgrind" => \$options{valgrind},
64 "strace" => \$options{strace},
65 "xtrace" => \$options{xtrace},
66 "display=s" => \@displays,
67 "parallel=i" => \$parallel,
71 pod2usage(-verbose => 2, -exitcode => 0) if $help;
73 # Check for missing executables
77 ../i3-config-wizard/i3-config-wizard
78 ../i3-dump-log/i3-dump-log
81 ../i3-nagbar/i3-nagbar
84 foreach my $binary (@binaries) {
85 die "$binary executable not found, did you run “make”?" unless -e $binary;
86 die "$binary is not an executable" unless -x $binary;
89 @displays = split(/,/, join(',', @displays));
90 @displays = map { s/ //g; $_ } @displays;
92 # 2: get a list of all testcases
93 my @testfiles = @ARGV;
95 # if no files were passed on command line, run all tests from t/
96 @testfiles = <t/*.t> if @testfiles == 0;
98 my $numtests = scalar @testfiles;
100 # No displays specified, let’s start some Xdummy instances.
101 if (@displays == 0) {
102 @displays = start_xdummy($parallel, $numtests, $keep_xdummy_output);
105 # 1: create an output directory for this test-run
106 my $outdir = "testsuite-";
107 $outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime());
108 $outdir .= `git describe --tags`;
110 mkdir($outdir) or die "Could not create $outdir";
111 unlink("latest") if -e "latest";
112 symlink("$outdir", "latest") or die "Could not symlink latest to $outdir";
115 # connect to all displays for two reasons:
116 # 1: check if the display actually works
117 # 2: keep the connection open so that i3 is not the only client. this prevents
118 # the X server from exiting (Xdummy will restart it, but not quick enough
121 for my $display (@displays) {
123 my $x = X11::XCB::Connection->new(display => $display);
125 die "Could not connect to display $display\n";
127 # start a TestWorker for each display
128 push @single_worker, worker($display, $x, $outdir, \%options);
132 # Read previous timing information, if available. We will be able to roughly
133 # predict the test duration and schedule a good order for the tests.
134 my $timingsjson = StartXDummy::slurp('.last_run_timings.json');
135 %timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;
137 # Re-order the files so that those which took the longest time in the previous
138 # run will be started at the beginning to not delay the whole run longer than
140 @testfiles = map { $_->[0] }
141 sort { $b->[1] <=> $a->[1] }
142 map { [$_, $timings{$_} // 999] } @testfiles;
144 # Run 000-load-deps.t first to bail out early when dependencies are missing.
145 my $loadtest = "t/000-load-deps.t";
146 if ($loadtest ~~ @testfiles) {
147 @testfiles = ($loadtest, grep { $_ ne $loadtest } @testfiles);
150 printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
151 if exists($timings{GLOBAL});
153 # Forget the old timings, we don’t necessarily run the same set of tests as
154 # before. Otherwise we would end up with left-overs.
155 %timings = (GLOBAL => time());
157 my $logfile = "$outdir/complete-run.log";
158 open $log, '>', $logfile or die "Could not create '$logfile': $!";
160 say "Writing logfile to '$logfile'...";
164 my $num = @testfiles;
165 my $harness = TAP::Harness->new({ });
167 my $aggregator = TAP::Parser::Aggregator->new();
168 $aggregator->start();
170 status_init(displays => \@displays, tests => $num);
172 my $single_cv = AE::cv;
174 # We start tests concurrently: For each display, one test gets started. Every
175 # test starts another test after completing.
176 for (@single_worker) {
178 take_job($_, $single_cv, \@testfiles);
185 # print empty lines to seperate failed tests from statuslines
189 my ($test, $output) = @$_;
190 say "no output for $test" unless $output;
191 Log "output for $test:";
193 # print error messages of failed tests
194 say for $output =~ /^not ok.+\n+((?:^#.+\n)+)/mg
198 $harness->summary($aggregator);
202 # 5: Save the timings for better scheduling/prediction next run.
203 $timings{GLOBAL} = time() - $timings{GLOBAL};
204 open(my $fh, '>', '.last_run_timings.json');
205 print $fh encode_json(\%timings);
208 # 6: Print the slowest test files.
209 my @slowest = map { $_->[0] }
210 sort { $b->[1] <=> $a->[1] }
211 map { [$_, $timings{$_}] }
212 grep { !/^GLOBAL$/ } keys %timings;
214 say 'The slowest tests are:';
215 printf("\t%s with %.2f seconds\n", $_, $timings{$_})
216 for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];
218 # When we are running precisely one test, print the output. Makes developing
219 # with a single testcase easier.
220 if ($numtests == 1) {
223 say StartXDummy::slurp($logfile);
231 # Takes a test from the beginning of @testfiles and runs it.
233 # The TAP::Parser (which reads the test output) will get called as soon as
234 # there is some activity on the stdout file descriptor of the test process
235 # (using an AnyEvent->io watcher).
237 # When a test completes and @done contains $num entries, the $cv condvar gets
238 # triggered to finish testing.
241 my ($worker, $cv, $tests) = @_;
243 my $test = shift @$tests
246 my $display = $worker->{display};
248 Log status($display, "$test: starting");
249 $timings{$test} = time();
250 worker_next($worker, $test);
252 # create a TAP::Parser with an in-memory fh
254 my $parser = TAP::Parser->new({
255 source => do { open(my $fh, '<', \$output); $fh },
258 my $ipc = $worker->{ipc};
265 state $tests_completed = 0;
268 sysread($ipc, my $buf, 4096) or die "sysread: $!";
271 $buf = $partial . $buf;
275 # make sure we feed TAP::Parser complete lines so it doesn't blow up
276 if (substr($buf, -1, 1) ne "\n") {
277 my $nl = rindex($buf, "\n");
283 # strip partial from buffer
284 $partial = substr($buf, $nl + 1, '');
287 # count lines before stripping eof-marker otherwise we might
288 # end up with for (1 .. 0) { } which would effectivly skip the loop
289 my $lines = $buf =~ tr/\n//;
290 my $t_eof = $buf =~ s/^$TestWorker::EOF$//m;
295 my $result = $parser->next;
296 next unless defined($result);
297 if ($result->is_test) {
299 status($display, "$test: [$tests_completed/??] ");
300 } elsif ($result->is_bailout) {
301 Log status($display, "$test: BAILOUT");
302 status_completed(scalar @done);
304 say "test $test bailed out: " . $result->explanation;
309 return unless $t_eof;
311 Log status($display, "$test: finished");
312 $timings{$test} = time() - $timings{$test};
313 status_completed(scalar @done);
315 $aggregator->add($test, $parser);
316 push @done, [ $test, $output ];
319 take_job($worker, $cv, $tests);
325 $_->() for our @CLEANUP;
329 # must be in a begin block because we C<exit 0> above
332 require Carp; Carp::cluck("Caught SIG$_[0]\n");
334 } for qw(INT TERM QUIT KILL PIPE)
341 complete-run.pl - Run the i3 testsuite
345 complete-run.pl [files...]
349 To run the whole testsuite on a reasonable number of Xdummy instances (your
350 running X11 will not be touched), run:
353 To run only a specific test (useful when developing a new feature), run:
354 ./complete-run t/100-fullscreen.t
362 Specifies which X11 display should be used. Can be specified multiple times and
363 will parallelize the tests:
365 # Run tests on the second X server
366 ./complete-run.pl -d :1
368 # Run four tests in parallel on some Xdummy servers
369 ./complete-run.pl -d :1,:2,:3,:4
371 Note that it is not necessary to specify this anymore. If omitted,
372 complete-run.pl will start (num_cores * 2) Xdummy instances.
376 Runs i3 under valgrind to find memory problems. The output will be available in
377 C<latest/valgrind-for-$test.log>.
381 Runs i3 under strace to trace system calls. The output will be available in
382 C<latest/strace-for-$test.log>.
386 Runs i3 under xtrace to trace X11 requests/replies. The output will be
387 available in C<latest/xtrace-for-$test.log>.
389 =item B<--coverage-testing>
391 Exits i3 cleanly (instead of kill -9) to make coverage testing work properly.
395 Number of Xdummy instances to start (if you don’t want to start num_cores * 2
396 instances for some reason).
398 # Run all tests on a single Xdummy instance
399 ./complete-run.pl -p 1