2 # vim:ts=4:sw=4:expandtab
3 # © 2010-2011 Michael Stapelberg and contributors
8 # the following are modules which ship with Perl (>= 5.10):
11 use File::Temp qw(tempfile tempdir);
16 use TAP::Parser::Aggregator;
17 use Time::HiRes qw(time);
18 # these are shipped with the testsuite
23 # the following modules are not shipped with Perl
27 use AnyEvent::I3 qw(:all);
28 use X11::XCB::Connection;
29 use JSON::XS; # AnyEvent::I3 depends on it, too.
31 # Close superfluous file descriptors which were passed by running in a VIM
32 # subshell or situations like that.
33 AnyEvent::Util::close_all_fds_except(0, 1, 2);
35 # convinience wrapper to write to the log file
37 sub Log { say $log "@_" }
40 my $coverage_testing = 0;
44 # Number of tests to run in parallel. Important to know how many Xdummy
45 # instances we need to start (unless @displays are given). Defaults to
50 my $result = GetOptions(
51 "coverage-testing" => \$coverage_testing,
52 "valgrind" => \$valgrind,
54 "display=s" => \@displays,
55 "parallel=i" => \$parallel,
59 pod2usage(-verbose => 2, -exitcode => 0) if $help;
61 @displays = split(/,/, join(',', @displays));
62 @displays = map { s/ //g; $_ } @displays;
64 # 2: get a list of all testcases
65 my @testfiles = @ARGV;
67 # if no files were passed on command line, run all tests from t/
68 @testfiles = <t/*.t> if @testfiles == 0;
70 my $numtests = scalar @testfiles;
72 # No displays specified, let’s start some Xdummy instances.
73 @displays = start_xdummy($parallel, $numtests) if @displays == 0;
75 # 1: create an output directory for this test-run
76 my $outdir = "testsuite-";
77 $outdir .= POSIX::strftime("%Y-%m-%d-%H-%M-%S-", localtime());
78 $outdir .= `git describe --tags`;
80 mkdir($outdir) or die "Could not create $outdir";
81 unlink("latest") if -e "latest";
82 symlink("$outdir", "latest") or die "Could not symlink latest to $outdir";
85 # connect to all displays for two reasons:
86 # 1: check if the display actually works
87 # 2: keep the connection open so that i3 is not the only client. this prevents
88 # the X server from exiting (Xdummy will restart it, but not quick enough
91 for my $display (@displays) {
93 my $x = X11::XCB::Connection->new(display => $display);
95 die "Could not connect to display $display\n";
97 # start a TestWorker for each display
98 push @worker, worker($display, $x, $outdir);
102 # Read previous timing information, if available. We will be able to roughly
103 # predict the test duration and schedule a good order for the tests.
104 my $timingsjson = StartXDummy::slurp('.last_run_timings.json');
105 %timings = %{decode_json($timingsjson)} if length($timingsjson) > 0;
107 # Re-order the files so that those which took the longest time in the previous
108 # run will be started at the beginning to not delay the whole run longer than
110 @testfiles = map { $_->[0] }
111 sort { $b->[1] <=> $a->[1] }
112 map { [$_, $timings{$_} // 999] } @testfiles;
114 printf("\nRough time estimate for this run: %.2f seconds\n\n", $timings{GLOBAL})
115 if exists($timings{GLOBAL});
117 # Forget the old timings, we don’t necessarily run the same set of tests as
118 # before. Otherwise we would end up with left-overs.
119 %timings = (GLOBAL => time());
121 my $logfile = "$outdir/complete-run.log";
122 open $log, '>', $logfile or die "Could not create '$logfile': $!";
123 say "Writing logfile to '$logfile'...";
127 my $num = @testfiles;
128 my $harness = TAP::Harness->new({ });
130 my $aggregator = TAP::Parser::Aggregator->new();
131 $aggregator->start();
133 status_init(displays => \@displays, tests => $num);
137 # We start tests concurrently: For each display, one test gets started. Every
138 # test starts another test after completing.
139 for (@worker) { $cv->begin; take_job($_) }
145 # print empty lines to seperate failed tests from statuslines
149 my ($test, $output) = @$_;
150 say "no output for $test" unless $output;
151 Log "output for $test:";
153 # print error messages of failed tests
154 say for $output =~ /^not ok.+\n+((?:^#.+\n)+)/mg
158 $harness->summary($aggregator);
162 # 5: Save the timings for better scheduling/prediction next run.
163 $timings{GLOBAL} = time() - $timings{GLOBAL};
164 open(my $fh, '>', '.last_run_timings.json');
165 print $fh encode_json(\%timings);
168 # 6: Print the slowest test files.
169 my @slowest = map { $_->[0] }
170 sort { $b->[1] <=> $a->[1] }
171 map { [$_, $timings{$_}] }
172 grep { !/^GLOBAL$/ } keys %timings;
174 say 'The slowest tests are:';
175 printf("\t%s with %.2f seconds\n", $_, $timings{$_})
176 for @slowest[0..($#slowest > 4 ? 4 : $#slowest)];
178 # When we are running precisely one test, print the output. Makes developing
179 # with a single testcase easier.
180 if ($numtests == 1) {
183 say StartXDummy::slurp($logfile);
191 # Takes a test from the beginning of @testfiles and runs it.
193 # The TAP::Parser (which reads the test output) will get called as soon as
194 # there is some activity on the stdout file descriptor of the test process
195 # (using an AnyEvent->io watcher).
197 # When a test completes and @done contains $num entries, the $cv condvar gets
198 # triggered to finish testing.
203 my $test = shift @testfiles
206 my $display = $worker->{display};
208 Log status($display, "$test: starting");
209 $timings{$test} = time();
210 worker_next($worker, $test);
212 # create a TAP::Parser with an in-memory fh
214 my $parser = TAP::Parser->new({
215 source => do { open(my $fh, '<', \$output); $fh },
218 my $ipc = $worker->{ipc};
225 state $tests_completed = 0;
228 sysread($ipc, my $buf, 4096) or die "sysread: $!";
231 $buf = $partial . $buf;
235 # make sure we feed TAP::Parser complete lines so it doesn't blow up
236 if (substr($buf, -1, 1) ne "\n") {
237 my $nl = rindex($buf, "\n");
243 # strip partial from buffer
244 $partial = substr($buf, $nl + 1, '');
247 # count lines before stripping eof-marker otherwise we might
248 # end up with for (1 .. 0) { } which would effectivly skip the loop
249 my $lines = $buf =~ tr/\n//;
250 my $t_eof = $buf =~ s/^$TestWorker::EOF$//m;
255 my $result = $parser->next;
256 if (defined($result) and $result->is_test) {
258 status($display, "$test: [$tests_completed/??] ");
262 return unless $t_eof;
264 Log status($display, "$test: finished");
265 $timings{$test} = time() - $timings{$test};
266 status_completed(scalar @done);
268 $aggregator->add($test, $parser);
269 push @done, [ $test, $output ];
278 $_->() for our @CLEANUP;
282 # must be in a begin block because we C<exit 0> above
285 require Carp; Carp::cluck("Caught SIG$_[0]\n");
287 } for qw(INT TERM QUIT KILL PIPE)
294 complete-run.pl - Run the i3 testsuite
298 complete-run.pl [files...]
302 To run the whole testsuite on a reasonable number of Xdummy instances (your
303 running X11 will not be touched), run:
306 To run only a specific test (useful when developing a new feature), run:
307 ./complete-run t/100-fullscreen.t
315 Specifies which X11 display should be used. Can be specified multiple times and
316 will parallelize the tests:
318 # Run tests on the second X server
319 ./complete-run.pl -d :1
321 # Run four tests in parallel on some Xdummy servers
322 ./complete-run.pl -d :1,:2,:3,:4
324 Note that it is not necessary to specify this anymore. If omitted,
325 complete-run.pl will start (num_cores * 2) Xdummy instances.
329 Runs i3 under valgrind to find memory problems. The output will be available in
330 C<latest/valgrind-for-$test.log>.
334 Runs i3 under strace to trace system calls. The output will be available in
335 C<latest/strace-for-$test.log>.
337 =item B<--coverage-testing>
339 Exits i3 cleanly (instead of kill -9) to make coverage testing work properly.
343 Number of Xdummy instances to start (if you don’t want to start num_cores * 2
344 instances for some reason).
346 # Run all tests on a single Xdummy instance
347 ./complete-run.pl -p 1