Bweb - A Bacula web interface
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bweb is Eric Bollengier.
The main author of Bacula is Kern Sibbald, with contributions from
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
-=head1 VERSION
-
- $Id$
-
=cut
use Bweb;
$self->{cwdid} = $pathid;
}
-# do a cd ..
-sub up_dir
-{
- my ($self) = @_ ;
- my $query = "
- SELECT PPathId
- FROM brestore_pathhierarchy
- WHERE PathId IN ($self->{cwdid}) ";
-
- my $all = $self->dbh_selectall_arrayref($query);
- return unless ($all); # already at root
-
- my $dir = join(',', map { $_->[0] } @$all);
- if ($dir) {
- $self->ch_dir($dir);
- }
-}
-
# return the current PWD
sub pwd
{
sub update_cache
{
my ($self) = @_;
-
- $self->{dbh}->begin_work();
-
- # getting all Jobs to "cache"
- my $query = "
- SELECT JobId from Job
- WHERE JobId NOT IN (SELECT JobId FROM brestore_knownjobid)
- AND Type IN ('B') AND JobStatus IN ('T', 'f', 'A')
- ORDER BY JobId";
- my $jobs = $self->dbh_selectall_arrayref($query);
-
- $self->update_brestore_table(map { $_->[0] } @$jobs);
-
- $self->{dbh}->commit();
- $self->{dbh}->begin_work(); # we can break here
-
- print STDERR "Cleaning path visibility\n";
-
- my $nb = $self->dbh_do("
- DELETE FROM brestore_pathvisibility
- WHERE NOT EXISTS
- (SELECT 1 FROM Job WHERE JobId=brestore_pathvisibility.JobId)");
-
- print STDERR "$nb rows affected\n";
- print STDERR "Cleaning known jobid\n";
-
- $nb = $self->dbh_do("
- DELETE FROM brestore_knownjobid
- WHERE NOT EXISTS
- (SELECT 1 FROM Job WHERE JobId=brestore_knownjobid.JobId)");
-
- print STDERR "$nb rows affected\n";
-
- $self->{dbh}->commit();
+ my $b = $self->get_bconsole();
+ $b->send_one_cmd(".bvfs_update" . $self->{bvfs_user});
}
sub update_brestore_table
{
my ($self, @jobs) = @_;
-
- $self->debug(\@jobs);
-
- foreach my $job (sort {$a <=> $b} @jobs)
- {
- my $query = "SELECT 1 FROM brestore_knownjobid WHERE JobId = $job";
- my $retour = $self->dbh_selectrow_arrayref($query);
- next if ($retour and ($retour->[0] == 1)); # We have allready done this one ...
-
- print STDERR "Inserting path records for JobId $job\n";
- $query = "INSERT INTO brestore_pathvisibility (PathId, JobId)
- (SELECT DISTINCT PathId, JobId FROM File WHERE JobId = $job)";
-
- $self->dbh_do($query);
-
- # Now we have to do the directory recursion stuff to determine missing visibility
- # We try to avoid recursion, to be as fast as possible
- # We also only work on not allready hierarchised directories...
-
- print STDERR "Creating missing recursion paths for $job\n";
-
- $query = "
-SELECT brestore_pathvisibility.PathId, Path FROM brestore_pathvisibility
- JOIN Path ON( brestore_pathvisibility.PathId = Path.PathId)
- LEFT JOIN brestore_pathhierarchy ON (brestore_pathvisibility.PathId = brestore_pathhierarchy.PathId)
- WHERE brestore_pathvisibility.JobId = $job
- AND brestore_pathhierarchy.PathId IS NULL
- ORDER BY Path";
-
- my $sth = $self->dbh_prepare($query);
- $sth->execute();
- my $pathid; my $path;
- $sth->bind_columns(\$pathid,\$path);
-
- while ($sth->fetch)
- {
- $self->build_path_hierarchy($path,$pathid);
- }
- $sth->finish();
-
- # Great. We have calculated all dependancies. We can use them to add the missing pathids ...
- # This query gives all parent pathids for a given jobid that aren't stored.
- # It has to be called until no record is updated ...
- $query = "
-INSERT INTO brestore_pathvisibility (PathId, JobId) (
- SELECT a.PathId,$job
- FROM (
- SELECT DISTINCT h.PPathId AS PathId
- FROM brestore_pathhierarchy AS h
- JOIN brestore_pathvisibility AS p ON (h.PathId=p.PathId)
- WHERE p.JobId=$job) AS a LEFT JOIN
- (SELECT PathId
- FROM brestore_pathvisibility
- WHERE JobId=$job) AS b ON (a.PathId = b.PathId)
- WHERE b.PathId IS NULL)";
-
- my $rows_affected;
- while (($rows_affected = $self->dbh_do($query)) and ($rows_affected !~ /^0/))
- {
- print STDERR "Recursively adding $rows_affected records from $job\n";
- }
- # Job's done
- $query = "INSERT INTO brestore_knownjobid (JobId) VALUES ($job)";
- $self->dbh_do($query);
- }
-}
-
-# compute the parent directory
-sub parent_dir
-{
- my ($path) = @_;
- # Root Unix case :
- if ($path eq '/')
- {
- return '';
- }
- # Root Windows case :
- if ($path =~ /^[a-z]+:\/$/i)
- {
- return '';
- }
- # Split
- my @tmp = split('/',$path);
- # We remove the last ...
- pop @tmp;
- my $tmp = join ('/',@tmp) . '/';
- return $tmp;
-}
-
-sub build_path_hierarchy
-{
- my ($self, $path,$pathid)=@_;
- # Does the ppathid exist for this ? we use a memory cache...
- # In order to avoid the full loop, we consider that if a dir is allready in the
- # brestore_pathhierarchy table, then there is no need to calculate all the hierarchy
- while ($path ne '')
- {
- if (! $self->{cache_ppathid}->{$pathid})
- {
- my $query = "SELECT PPathId FROM brestore_pathhierarchy WHERE PathId = ?";
- my $sth2 = $self->{dbh}->prepare_cached($query);
- $sth2->execute($pathid);
- # Do we have a result ?
- if (my $refrow = $sth2->fetchrow_arrayref)
- {
- $self->{cache_ppathid}->{$pathid}=$refrow->[0];
- $sth2->finish();
- # This dir was in the db ...
- # It means we can leave, the tree has allready been built for
- # this dir
- return 1;
- } else {
- $sth2->finish();
- # We have to create the record ...
- # What's the current p_path ?
- my $ppath = parent_dir($path);
- my $ppathid = $self->return_pathid_from_path($ppath);
- $self->{cache_ppathid}->{$pathid}= $ppathid;
-
- $query = "INSERT INTO brestore_pathhierarchy (pathid, ppathid) VALUES (?,?)";
- $sth2 = $self->{dbh}->prepare_cached($query);
- $sth2->execute($pathid,$ppathid);
- $sth2->finish();
- $path = $ppath;
- $pathid = $ppathid;
- }
- } else {
- # It's allready in the cache.
- # We can leave, no time to waste here, all the parent dirs have allready
- # been done
- return 1;
- }
- }
- return 1;
-}
-
-sub return_pathid_from_path
-{
- my ($self, $path) = @_;
- my $query = "SELECT PathId FROM Path WHERE Path = ?";
-
- #print STDERR $query,"\n" if $debug;
- my $sth = $self->{dbh}->prepare_cached($query);
- $sth->execute($path);
- my $result =$sth->fetchrow_arrayref();
- $sth->finish();
- if (defined $result)
- {
- return $result->[0];
-
- } else {
- # A bit dirty : we insert into path, and we have to be sure
- # we aren't deleted by a purge. We still need to insert into path to get
- # the pathid, because of mysql
- $query = "INSERT INTO Path (Path) VALUES (?)";
- #print STDERR $query,"\n" if $debug;
- $sth = $self->{dbh}->prepare_cached($query);
- $sth->execute($path);
- $sth->finish();
-
- $query = "SELECT PathId FROM Path WHERE Path = ?";
- #print STDERR $query,"\n" if $debug;
- $sth = $self->{dbh}->prepare_cached($query);
- $sth->execute($path);
- $result = $sth->fetchrow_arrayref();
- $sth->finish();
- return $result->[0];
- }
+ my $jobs = join(",", sort {$a <=> $b} @jobs);
+ my $b = $self->get_bconsole();
+ $b->send_one_cmd(".bvfs_update jobid=$jobs" . $self->{bvfs_user});
}
# list all files in a directory, accross curjobids
my ($self) = @_;
return undef unless ($self->{curjobids});
-
- my $inclause = $self->{curjobids};
- my $inpath = $self->{cwdid};
- my $filter = '';
- if ($self->{pattern}) {
- $filter = " AND Filename.Name $self->{sql}->{MATCH} $self->{pattern} ";
- }
-
- my $query =
-"SELECT File.FilenameId, listfiles.id, listfiles.Name, File.LStat, File.JobId
- FROM File, (
- SELECT Filename.Name, max(File.FileId) as id
- FROM File, Filename
- WHERE File.FilenameId = Filename.FilenameId
- AND Filename.Name != ''
- AND File.PathId = $inpath
- AND File.JobId IN ($inclause)
- $filter
- GROUP BY Filename.Name
- ORDER BY Filename.Name LIMIT $self->{limit} OFFSET $self->{offset}
- ) AS listfiles
-WHERE File.FileId = listfiles.id";
-
-# print STDERR $query;
- $self->debug($query);
- my $result = $self->dbh_selectall_arrayref($query);
- $self->debug($result);
-
- return $result;
-}
-
-sub ls_special_dirs
-{
- my ($self) = @_;
- return undef unless ($self->{curjobids});
-
+
my $pathid = $self->{cwdid};
my $jobclause = $self->{curjobids};
- my $dir_filenameid = $self->get_dir_filenameid();
-
- my $sq1 =
-"((SELECT PPathId AS PathId, '..' AS Path
- FROM brestore_pathhierarchy
- WHERE PathId = $pathid)
-UNION
- (SELECT $pathid AS PathId, '.' AS Path))";
-
- my $sq2 = "
-SELECT tmp.PathId, tmp.Path, LStat, JobId
- FROM $sq1 AS tmp LEFT JOIN ( -- get attributes if any
- SELECT File1.PathId, File1.JobId, File1.LStat FROM File AS File1
- WHERE File1.FilenameId = $dir_filenameid
- AND File1.JobId IN ($jobclause)) AS listfile1
- ON (tmp.PathId = listfile1.PathId)
- ORDER BY tmp.Path, JobId DESC
-";
-
- my $result = $self->dbh_selectall_arrayref($sq2);
+ my $filter ='';
+ if ($self->{pattern}) {
+ $filter = " pattern=\"$self->{pattern}\"";
+ }
+ my $b = $self->get_bconsole();
+ my $ret = $b->send_one_cmd(".bvfs_lsfiles jobid=$jobclause " .
+ "pathid=$pathid " . $self->{bvfs_user} .
+ "limit=$self->{limit} offset=$self->{offset} " .
+ $filter);
+
+ # 0 1 2 3 4 5
+ # PathId, FilenameId, fileid, jobid, lstat, Name
my @return_list;
- my $prev_dir='';
- foreach my $refrow (@{$result})
+ foreach my $line (@{$ret})
{
- my $dirid = $refrow->[0];
- my $dir = $refrow->[1];
- my $lstat = $refrow->[3];
- my $jobid = $refrow->[2] || 0;
- next if ($dirid eq $prev_dir);
- my @return_array = ($dirid,$dir,$lstat,$jobid);
+ next unless ($line =~ /^\d+\t\d+/);
+ chomp($line);
+ my @row = split("\t", $line, 6);
+ my $fid = $row[2];
+ my $fnid = $row[1];
+ my $name = $row[5];
+ my $lstat = $row[4];
+ my $jobid = $row[3] || 0;
+ # We have to clean up this dirname ... we only want it's 'basename'
+ my @return_array = ($fnid, $fid,$name,$lstat,$jobid);
push @return_list,(\@return_array);
- $prev_dir = $dirid;
}
-
- return \@return_list;
-}
+#FilenameId, listfiles.id, Name, File.LStat, File.JobId
-# Let's retrieve the list of the visible dirs in this dir ...
-# First, I need the empty filenameid to locate efficiently
-# the dirs in the file table
-sub get_dir_filenameid
-{
- my ($self) = @_;
- if ($self->{dir_filenameid}) {
- return $self->{dir_filenameid};
- }
- my $query = "SELECT FilenameId FROM Filename WHERE Name = ''";
- my $sth = $self->dbh_prepare($query);
- $sth->execute();
- my $result = $sth->fetchrow_arrayref();
- $sth->finish();
- return $self->{dir_filenameid} = $result->[0];
+ return \@return_list;
}
# list all directories in a directory, accross curjobids
my ($self) = @_;
return undef unless ($self->{curjobids});
-
+
my $pathid = $self->{cwdid};
my $jobclause = $self->{curjobids};
my $filter ='';
if ($self->{pattern}) {
- $filter = " AND Path2.Path $self->{sql}->{MATCH} $self->{pattern} ";
+ $filter = " pattern=\"$self->{pattern}\" ";
}
-
- # Let's retrieve the list of the visible dirs in this dir ...
- # First, I need the empty filenameid to locate efficiently
- # the dirs in the file table
- my $dir_filenameid = $self->get_dir_filenameid();
-
- # Then we get all the dir entries from File ...
- my $query = "
-SELECT PathId, Path, JobId, LStat FROM (
-
- SELECT Path1.PathId, Path1.Path, lower(Path1.Path),
- listfile1.JobId, listfile1.LStat
- FROM (
- SELECT DISTINCT brestore_pathhierarchy1.PathId
- FROM brestore_pathhierarchy AS brestore_pathhierarchy1
- JOIN Path AS Path2
- ON (brestore_pathhierarchy1.PathId = Path2.PathId)
- JOIN brestore_pathvisibility AS brestore_pathvisibility1
- ON (brestore_pathhierarchy1.PathId = brestore_pathvisibility1.PathId)
- WHERE brestore_pathhierarchy1.PPathId = $pathid
- AND brestore_pathvisibility1.jobid IN ($jobclause)
- $filter
- ) AS listpath1
- JOIN Path AS Path1 ON (listpath1.PathId = Path1.PathId)
-
- LEFT JOIN ( -- get attributes if any
- SELECT File1.PathId, File1.JobId, File1.LStat FROM File AS File1
- WHERE File1.FilenameId = $dir_filenameid
- AND File1.JobId IN ($jobclause)) AS listfile1
- ON (listpath1.PathId = listfile1.PathId)
- ) AS A ORDER BY 2,3 DESC LIMIT $self->{limit} OFFSET $self->{offset}
-";
-# print STDERR $query;
- my $sth=$self->dbh_prepare($query);
- $sth->execute();
- my $result = $sth->fetchall_arrayref();
+ my $b = $self->get_bconsole();
+ my $ret = $b->send_one_cmd(".bvfs_lsdir jobid=$jobclause pathid=$pathid " .
+ $self->{bvfs_user} .
+ "limit=$self->{limit} offset=$self->{offset} " .
+ $filter);
+
+ # 0 1 2 3 4 5
+ # PathId, 0, fileid, jobid, lstat, path
my @return_list;
my $prev_dir='';
- foreach my $refrow (@{$result})
+ foreach my $line (@{$ret})
{
- my $dirid = $refrow->[0];
- my $dir = $refrow->[1];
- my $lstat = $refrow->[3];
- my $jobid = $refrow->[2] || 0;
- next if ($dirid eq $prev_dir);
+ next unless ($line =~ /^\d+\t\d+/);
+ chomp($line);
+ my @row = split("\t", $line, 6);
+ my $dirid = $row[0];
+ my $dir = $row[5];
+ my $lstat = $row[4];
+ my $jobid = $row[3] || 0;
+ next if ($self->{skipdot} && $dir =~ /^\.+$/);
# We have to clean up this dirname ... we only want it's 'basename'
- my $return_value;
- if ($dir ne '/')
- {
- my @temp = split ('/',$dir);
- $return_value = pop @temp;
- }
- else
- {
- $return_value = '/';
- }
- my @return_array = ($dirid,$return_value,$lstat,$jobid);
+ my @return_array = ($dirid,$dir,'', $lstat,$jobid);
push @return_list,(\@return_array);
$prev_dir = $dirid;
}
- $self->debug(\@return_list);
+
return \@return_list;
}
# Returns list of versions of a file that could be restored
# returns an array of
-# (jobid,fileindex,mtime,size,inchanger,md5,volname,fileid)
+# (jobid,fileindex,mtime,size,inchanger,md5,volname,fileid,LinkFI)
# there will be only one jobid in the array of jobids...
sub get_all_file_versions
{
my ($self,$pathid,$fileid,$client,$see_all,$see_copies)=@_;
defined $see_all or $see_all=0;
- my $backup_type=" AND Job.Type = 'B' ";
+ my $backup_type="";
if ($see_copies) {
- $backup_type=" AND Job.Type IN ('C', 'B') ";
+ $backup_type=" copies ";
}
- my @versions;
- my $query;
- $query =
-"SELECT File.JobId, File.FileId, File.LStat,
- File.Md5, Media.VolumeName, Media.InChanger
- FROM File, Job, Client, JobMedia, Media
- WHERE File.FilenameId = $fileid
- AND File.PathId=$pathid
- AND File.JobId = Job.JobId
- AND Job.ClientId = Client.ClientId
- AND Job.JobId = JobMedia.JobId
- AND File.FileIndex >= JobMedia.FirstIndex
- AND File.FileIndex <= JobMedia.LastIndex
- AND JobMedia.MediaId = Media.MediaId
- AND Client.Name = '$client'
- $backup_type
-";
-
- $self->debug($query);
- my $result = $self->dbh_selectall_arrayref($query);
+ my $bc = $self->get_bconsole();
+ my $res = $bc->send_one_cmd(".bvfs_versions fnid=$fileid pathid=$pathid " .
+ "client=\"$client\" jobid=1 $backup_type" .
+ $self->{bvfs_user});
- foreach my $refrow (@$result)
+ my @versions;
+ # (pathid,fileid,jobid,fid,mtime,size,inchanger,md5,volname,LinkFI );
+ # PathId, FilenameId, fileid, jobid, lstat, Md5, VolName, VolInchanger
+ foreach my $row (@$res)
{
- my ($jobid, $fid, $lstat, $md5, $volname, $inchanger) = @$refrow;
+ next unless $row =~ /^\d+\t\d+/;
+ my ($pathid, $fid, $fileid, $jobid, $lstat, $md5, $volname, $inchanger)
+ = split(/\t/, $row);
+
my @attribs = parse_lstat($lstat);
my $mtime = array_attrib('st_mtime',\@attribs);
my $size = array_attrib('st_size',\@attribs);
+ my $LinkFI = array_attrib('LinkFI',\@attribs);
- my @list = ($pathid,$fileid,$jobid,
- $fid, $mtime, $size, $inchanger,
- $md5, $volname);
+ # 0 1 2 3 4 5 6
+ my @list = ($pathid,$fileid,$jobid, $fid, $mtime, $size, $inchanger,
+ $md5, $volname, $LinkFI);
push @versions, (\@list);
}
my $conf = new Bweb::Config(config_file => $Bweb::config_file);
$conf->load();
-my $bvfs = new Bvfs(info => $conf);
+my $skipdot=0;
+if (CGI::param("skipdot")) {
+ $skipdot=1;
+}
+
+my $bvfs = new Bvfs(info => $conf, skipdot => $skipdot);
+my $user = $bvfs->{loginname};
+if ($bvfs->{loginname}) {
+ $bvfs->{bvfs_user} = " username=\"$bvfs->{loginname}\" ";
+} else {
+ $bvfs->{bvfs_user} = "";
+}
$bvfs->connect_db();
my $action = CGI::param('action') || '';
my $args = $bvfs->get_form('pathid', 'filenameid', 'fileid', 'qdate',
- 'limit', 'offset', 'client', 'qpattern');
+ 'limit', 'offset', 'client');
if ($action eq 'batch') {
$bvfs->update_cache();
exit 0;
}
+my $pattern = CGI::param('pattern') || '';
+if ($pattern =~ /^([\w\d,:\.\-% ]+)$/) {
+ $bvfs->set_pattern($1);
+}
+
+my $nodir;
+if ($conf->{subconf}
+ && scalar(%{$conf->{subconf}}) # we have non empty subconf
+ && !$conf->{current_conf})
+{
+ $nodir=1;
+}
# All these functions are returning JSON compatible data
# for javascript parsing
if ($action eq 'list_client') { # list all client [ ['c1'],['c2']..]
print CGI::header('application/x-javascript');
+ if ($nodir) {
+ print "[['Choose a Director first']]\n";
+ exit 0;
+ }
+
my $filter = $bvfs->get_client_filter();
my $q = "SELECT Name FROM Client $filter";
my $ret = $bvfs->dbh_selectall_arrayref($q);
print "]\n";
exit 0;
-} elsif ($action eq 'list_storage') { # TODO: use .storage here
+
+} elsif ($action eq 'list_storage') {
print CGI::header('application/x-javascript');
- my $q="SELECT Name FROM Storage";
- my $lst = $bvfs->dbh_selectall_arrayref($q);
+ my $bconsole = $bvfs->get_bconsole();
+ my @lst = $bconsole->list_storage();
print "[";
- print join(',', map { "[ '$_->[0]' ]" } @$lst);
+ print join(',', map { "[ '$_' ]" } @lst);
print "]\n";
exit 0;
}
my $fileid = join(',', grep { /^\d+$/ } CGI::param('fileid'));
# can get dirid=("10,11", 10, 11)
- my @dirid = grep { /^\d+$/ } map { split(/,/) } CGI::param('dirid') ;
+ my $dirid = join(',', grep { /^\d+$/ }
+ map { split(/,/) } CGI::param('dirid')) ;
+ my $findex = join(',', grep { /^\d+$/ }
+ map { split(/,|\//) } CGI::param('findex')) ;
my $inclause = join(',', @jobid);
- my @union;
-
- if ($fileid) {
- push @union,
- "(SELECT JobId, FileIndex, FilenameId, PathId $FileId
- FROM File WHERE FileId IN ($fileid))";
- }
-
- foreach my $dirid (@dirid) {
- my $p = $bvfs->get_path($dirid);
- $p =~ s/([%_\\])/\\$1/g; # Escape % and _ for LIKE search
- $p = $bvfs->dbh_quote($p);
- push @union, "
- (SELECT File.JobId, File.FileIndex, File.FilenameId, File.PathId $FileId
- FROM Path JOIN File USING (PathId)
- WHERE Path.Path LIKE " . $bvfs->dbh_strcat($p, "'%'") . "
- AND File.JobId IN ($inclause))";
- }
-
- return unless scalar(@union);
-
- my $u = join(" UNION ", @union);
-
- $bvfs->dbh_do("CREATE TEMPORARY TABLE btemp AS $u");
- # TODO: remove FilenameId et PathId
-
- # now we have to choose the file with the max(jobid)
- # for each file of btemp
- if ($bvfs->dbh_is_mysql()) {
- $bvfs->dbh_do("CREATE TABLE b2$$ AS (
-SELECT max(JobId) as JobId, FileIndex $FileId
- FROM btemp
- GROUP BY PathId, FilenameId
- HAVING FileIndex > 0
-)");
- } else { # postgresql have distinct with more than one criteria
- $bvfs->dbh_do("CREATE TABLE b2$$ AS (
-SELECT JobId, FileIndex $FileId
-FROM (
- SELECT DISTINCT ON (PathId, FilenameId) JobId, FileIndex $FileId
- FROM btemp
- ORDER BY PathId, FilenameId, JobId DESC
- ) AS T
- WHERE FileIndex > 0
-)");
+ my $b = $bvfs->get_bconsole();
+ my $ret = $b->send_one_cmd(".bvfs_restore path=b2$$ fileid=$fileid " .
+ "dirid=$dirid hardlink=$findex jobid=1"
+ . $bvfs->{bvfs_user});
+ if (grep (/OK/, @$ret)) {
+ return "b2$$";
}
-
- return "b2$$";
+ return;
}
sub get_media_list_with_dir
#print STDERR "pathid=$pathid\n";
-# permit to use a regex filter
-if ($args->{qpattern}) {
- $bvfs->set_pattern($args->{qpattern});
-}
-
if ($action eq 'restore') {
# TODO: pouvoir choisir le replace et le jobname
- my $arg = $bvfs->get_form(qw/client storage regexwhere where/);
+ my $arg = $bvfs->get_form(qw/client storage regexwhere where comment dir/);
if (!$arg->{client}) {
print "ERROR: missing client\n";
my $table = fill_table_for_restore(@jobid);
if (!$table) {
+ print "ERROR: can create restore table\n";
exit 1;
}
+ # TODO: remove it after a while
+ if ($bvfs->get_db_field('Comment') ne 'Comment') {
+ delete $arg->{comment};
+ }
+
my $bconsole = $bvfs->get_bconsole();
# TODO: pouvoir choisir le replace et le jobname
my $jobid = $bconsole->run(client => $arg->{client},
- storage => $arg->{storage},
- where => $arg->{where},
- regexwhere=> $arg->{regexwhere},
- restore => 1,
- file => "?$table");
+ storage => $arg->{storage},
+ where => $arg->{where},
+ regexwhere=> $arg->{regexwhere},
+ restore => 1,
+ comment => $arg->{comment},
+ file => "?$table");
$bvfs->dbh_do("DROP TABLE $table");
$bvfs->display_end();
exit 0;
}
+
sleep(2);
- print CGI::redirect("bweb.pl?action=dsp_cur_job;jobid=$jobid") ;
+
+ my $dir='';
+ if ($arg->{dir}) {
+ $dir=";dir=$arg->{dir}";
+ }
+
+ print CGI::redirect("bweb.pl?action=dsp_cur_job;jobid=$jobid$dir") ;
exit 0;
}
sub escape_quote
if ($action eq 'list_files_dirs') {
-# fileid, filenameid, pathid, jobid, name, size, mtime
+# fileid, filenameid, pathid, jobid, name, size, mtime, LinkFI
my $jids = join(",", @jobid);
- my $files = $bvfs->ls_special_dirs();
+ my $files = $bvfs->ls_dirs();
# return ($dirid,$dir_basename,$lstat,$jobid)
- print "[\n";
- print join(',',
- map { my @p=Bvfs::parse_lstat($_->[3]);
- '[' . join(',',
- 0, # fileid
- 0, # filenameid
- $_->[0], # pathid
- "'$jids'", # jobid
- '"' . escape_quote($_->[1]) . '"', # name
- "'" . $p[7] . "'", # size
- "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11]||0)) . "'") .
- ']';
- } @$files);
- print "," if (@$files);
-
- $files = $bvfs->ls_dirs();
- # return ($dirid,$dir_basename,$lstat,$jobid)
- print join(',',
+ print '[', join(',',
map { my @p=Bvfs::parse_lstat($_->[3]);
'[' . join(',',
0, # fileid
"'$jids'", # jobid
'"' . escape_quote($_->[1]) . '"', # name
"'" . $p[7] . "'", # size
- "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11]||0)) . "'") .
+ "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11]||0)) . "'",
+ 0) . # LinkFI
']';
} @$files);
print join(',',
map { my @p=Bvfs::parse_lstat($_->[3]);
'[' . join(',',
- $_->[1],
- $_->[0],
- $pathid,
- $_->[4],
+ $_->[1], # fileid
+ $_->[0], # fnid
+ $pathid, # pathid
+ $_->[4], # jobid
'"' . escape_quote($_->[2]) . '"', # name
"'" . $p[7] . "'",
- "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11])) . "'") .
+ "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11])) . "'",
+ $p[13]) . # LinkFI
']';
} @$files);
print "]\n";
print "[[0,0,0,0,'.',4096,'1970-01-01 00:00:00'],";
my $files = $bvfs->ls_files();
# [ 1, 2, 3, "Bill", 10, '2007-01-01 00:00:00'],
-# File.FilenameId, listfiles.id, listfiles.Name, File.LStat, File.JobId
+# File.FilenameId, listfiles.id, listfiles.Name, File.LStat, File.JobId,LinkFI
print join(',',
map { my @p=Bvfs::parse_lstat($_->[3]);
$_->[4],
'"' . escape_quote($_->[2]) . '"', # name
"'" . $p[7] . "'",
- "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11])) . "'") .
+ "'" . strftime('%Y-%m-%d %H:%m:%S', localtime($p[11])) . "'",
+ $p[13]) . # LinkFI
']';
} @$files);
print "]\n";
print "[";
my $dirs = $bvfs->ls_dirs();
+
# return ($dirid,$dir_basename,$lstat,$jobid)
print join(',',
map { "{ 'jobid': '$bvfs->{curjobids}', 'id': '$_->[0]'," .
- "'text': '" . escape_quote($_->[1]) . "', 'cls':'folder'}" }
+ "'text': '" . escape_quote($_->[1]) . "', 'cls':'folder'}" }
@$dirs);
print "]\n";
$vcopies = ($vcopies eq 'false')?0:1;
print "[";
- # 0 1 2 3 4 5 6 7 8
- #($pathid,$fileid,$jobid, $fid, $mtime, $size, $inchanger, $md5, $volname);
+ # 0 1 2 3 4 5 6 7 8 9
+ #(pathid,fileid,jobid,fid,mtime,size,inchanger,md5,volname,LinkFI );
my $files = $bvfs->get_all_file_versions($args->{pathid}, $args->{filenameid}, $args->{client}, $vafv, $vcopies);
print join(',',
- map { "[ $_->[3], $_->[1], $_->[0], $_->[2], '$_->[8]', $_->[6], '$_->[7]', $_->[5],'" . strftime('%Y-%m-%d %H:%m:%S', localtime($_->[4])) . "']" }
+ map { "[ $_->[1], $_->[3], $_->[0], $_->[2], '$_->[8]', $_->[6], '$_->[7]', $_->[5],'" . strftime('%Y-%m-%d %H:%m:%S', localtime($_->[4])) . "',$_->[9]]" }
@$files);
print "]\n";
}
if ($table) {
- $bvfs->dbh_do("DROP TABLE $table");
+ my $b = $bvfs->get_bconsole();
+ $b->send_one_cmd(".bvfs_cleanup path=b2$$");
}
}
// Bweb - A Bacula web interface
// Bacula® - The Network Backup Solution
//
-// Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+// Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
//
// The main author of Bweb is Eric Bollengier.
// The main author of Bacula is Kern Sibbald, with contributions from
Ext.brestore.force_reload = 0;
Ext.brestore.path_stack = Array();
Ext.brestore.pathid_stack = Array();
+Ext.brestore.dir = get_current_director();
function get_node_path(node)
{
var temp='';
for (var p = node; p; p = p.parentNode) {
if (p.parentNode) {
- if (p.text == '/') {
- temp = p.text + temp;
- } else {
- temp = p.text + '/' + temp;
- }
+ temp = p.text + temp;
}
}
return Ext.brestore.root_path + temp;
baseParams['pattern'] = Ext.brestore.fpattern;
}
}
+ if (Ext.brestore.dir) {
+ baseParams['dir'] = Ext.brestore.dir;
+ }
return baseParams;
}
{name: 'name' },
{name: 'size', type: 'int' },
{name: 'mtime', type: 'date', dateFormat: 'Y-m-d h:i:s'},
- {name: 'type'}
+// {name: 'type'},
+ {name: 'LinkFI'}
]))
});
header: "JobId",
dataIndex: 'jobid',
hidden: true
+ },{
+ header: "LinkFI",
+ dataIndex: 'LinkFI',
+ hidden: true
}]);
// by default columns are sortable
} else if (r.json[4] == '/') {
Ext.brestore.path = '/';
} else if (r.json[4] != '.') {
- Ext.brestore.path = Ext.brestore.path + r.json[4] + '/';
+ Ext.brestore.path = Ext.brestore.path + r.json[4];
}
Ext.brestore.pathid = r.json[2];
Ext.brestore.filename = '';
{name: 'pathid' },
{name: 'name' },
{name: 'size', type: 'int' },
- {name: 'mtime'}//, type: 'date', dateFormat: 'Y-m-d h:i:s'}
+ {name: 'mtime'},
+ {name: 'LinkFI'}
]))
});
header: 'FileId',
dataIndex: 'fileid',
hidden: true
+ },{
+ header: 'LinkFI',
+ dataIndex: 'LinkFI',
+ hidden: true
}
]);
{name: 'filenameid'},
{name: 'pathid'},
{name: 'size'},
- {name: 'mtime'}
+ {name: 'mtime'},
+ {name: 'LinkFI'}
);
// captureEvents(file_selection_grid);
// captureEvents(file_selection_store);
{name: 'inchanger' },
{name: 'md5' },
{name: 'size', type: 'int' },
- {name: 'mtime', type: 'date', dateFormat: 'Y-m-d h:i:s'}
+ {name: 'mtime', type: 'date', dateFormat: 'Y-m-d h:i:s'},
+ {name: 'LinkFI'}
]))
});
header: "fileid",
dataIndex: 'fileid',
hidden: true
+ },{
+ header: "LinkFI",
+ dataIndex: 'LinkFI',
+ hidden: true
}]);
// by default columns are sortable
});
file_versions_grid.on('rowdblclick', function(e) {
- alert(e) ; file_versions_store.removeAll(); return true;
+ file_versions_store.removeAll(); return true;
});
while(root.firstChild){
root.removeChild(root.firstChild);
}
- job_store.load( {params:{action: 'list_job',
+ job_store.load( {params:{action: 'list_job',dir: Ext.brestore.dir,
client:Ext.brestore.client}});
return true;
});
Ext.brestore.jobid = c.json[0];
Ext.brestore.jobdate = c.json[1];
root.setText("Root");
- tree_loader.baseParams = init_params({init:1, action: 'list_dirs'});
+ tree_loader.baseParams = init_params({init:1, action: 'list_dirs',
+ skipdot:1});
root.reload();
file_store.load({params:init_params({action: 'list_files_dirs',
Ext.brestore.root_path=where;
root.setText(where);
tree_loader.baseParams = init_params({
- action:'list_dirs', path: where
+ action:'list_dirs', path: where, skipdot: 1
});
root.reload();
}
items: file_selection_grid
}
]});
- client_store.load({params:{action: 'list_client'}});
+ client_store.load({params:{action: 'list_client', dir: Ext.brestore.dir}});
// data.selections[0].json[]
// data.node.id
pathid: data.selections[i].json[2],
name: Ext.brestore.path + ((name=='..')?'':name),
size: data.selections[i].json[5],
- mtime: data.selections[i].json[6]
+ mtime: data.selections[i].json[6],
+ LinkFI: data.selections[i].json[7]
});
file_selection_store.add(r);
}
pathid: data.selections[0].json[2],
name: Ext.brestore.path + Ext.brestore.filename,
size: data.selections[0].json[7],
- mtime: data.selections[0].json[8]
+ mtime: data.selections[0].json[8],
+ LinkFI: data.selections[0].json[9],
});
file_selection_store.add(r)
}
pathid: data.node.id,
name: path,
size: 4096,
- mtime: 0
+ mtime: 0,
+ LinkFI: 0
});
file_selection_store.add(r)
}
triggerAction: 'all',
emptyText:'Select a client...',
forceSelection: true,
- value: Ext.brestore.client,
selectOnFocus:true
});
+
+ var storage_store = new Ext.data.Store({
+ proxy: new Ext.data.HttpProxy({
+ url: '/cgi-bin/bweb/bresto.pl',
+ method: 'GET',
+ params:{action:'list_storage'}
+ }),
+
+ reader: new Ext.data.ArrayReader({
+ }, Ext.data.Record.create([
+ {name: 'name' }
+ ]))
+ });
+
+ var storage_combo = new Ext.form.ComboBox({
+ value: Ext.brestore.storage,
+ fieldLabel: 'Storage',
+ hiddenName:'storage',
+ store: storage_store,
+ displayField:'name',
+ typeAhead: true,
+ mode: 'local',
+ triggerAction: 'all',
+ emptyText:'Select a storage...',
+ forceSelection: false,
+ selectOnFocus:true
+ });
+
+ var comment_text = new Ext.form.TextField({
+ fieldLabel: 'Comment',
+ name: 'comment',
+ allowBlank:true
+ });
+
var where_text = new Ext.form.TextField({
fieldLabel: 'Where',
name: 'where',
proxy: new Ext.data.HttpProxy({
url: '/cgi-bin/bweb/bresto.pl',
method: 'GET',
- params:{offset:0, limit:50 }
+ params:{offset:0, limit:50}
}),
reader: new Ext.data.ArrayReader({
autoHeight : true,
defaults : {width: 210},
bodyStyle : 'padding:5px 5px 0',
- items :[ rclient_combo, where_text, replace_combo ]
+ items :[ rclient_combo, where_text, replace_combo, comment_text ]
}, {
xtype : 'fieldset',
title : 'Media needed',
fieldLabel: 'Priority',
disabled: true,
tooltip: '1-100'
- }]
+ }, storage_combo]
}]
}]
}],
// Ext.brestore.dlglaunch.addKeyListener(27,
// Ext.brestore.dlglaunch.hide,
// Ext.brestore.dlglaunch);
-/*
- * var storage_store = new Ext.data.Store({
- * proxy: new Ext.data.HttpProxy({
- * url: '/cgi-bin/bweb/bresto.pl',
- * method: 'GET',
- * params:{action:'list_storage'}
- * }),
- *
- * reader: new Ext.data.ArrayReader({
- * }, Ext.data.Record.create([
- * {name: 'name' }
- * ]))
- * });
- */
////////////////////////////////////////////////////////////////
function launch_restore() {
var tab_fileid=new Array();
var tab_dirid=new Array();
var tab_jobid=new Array();
+ var tab_findex=new Array();
for(var i=0;i<items.length;i++) {
- if (items[i].data['fileid']) {
- tab_fileid.push(items[i].data['fileid']);
- } else {
- tab_dirid.push(items[i].data['pathid']);
- }
- tab_jobid.push(items[i].data['jobid']);
+ // For hardlinks, we include fileindex/jobid
+ if (items[i].data['LinkFI']) {
+ tab_findex.push(items[i].data['jobid']
+ + '/' + items[i].data['LinkFI']);
+ }
+ if (items[i].data['fileid']) {
+ tab_fileid.push(items[i].data['fileid']);
+ } else {
+ tab_dirid.push(items[i].data['pathid']);
+ }
+ tab_jobid.push(items[i].data['jobid']);
+ }
+ var dir='';
+ if (Ext.brestore.dir) {
+ dir = ";dir=" + encodeURIComponent(Ext.brestore.dir);
}
- var res = ';fileid=' + tab_fileid.join(";fileid=");
+
+ var res = ';fileid=' + tab_fileid.join(";fileid=");
var res2 = ';dirid=' + tab_dirid.join(";dirid=");
var res3 = ';jobid=' + tab_jobid.join(";jobid=");
-
- var res4 = ';client=' + rclient_combo.getValue();
-// if (storage_combo.getValue()) {
-// res4 = res4 + ';storage=' + storage_combo.getValue();
-// }
+ var res4 = ';client=' + encodeURIComponent(rclient_combo.getValue());
+ var res5 = ';findex=' + tab_findex.join(";findex=");
+ if (comment_text.getValue()) {
+ res4 = res4 + ';comment=' + encodeURIComponent(comment_text.getValue());
+ }
+ if (storage_combo.getValue()) {
+ res4 = res4 + ';storage=' + storage_combo.getValue();
+ }
if (Ext.brestore.use_filerelocation) {
if (useregexp_bp.getValue()) {
- res4 = res4 + ';regexwhere=' + rwhere_text.getValue();
+ res4 = res4 + ';regexwhere=' + encodeURIComponent(rwhere_text.getValue());
} else {
var reg = new Array();
if (stripprefix_text.getValue()) {
- reg.push('!' + stripprefix_text.getValue() + '!!i');
+ reg.push(encodeURIComponent('!' + stripprefix_text.getValue() + '!!i'));
}
if (addprefix_text.getValue()) {
- reg.push('!^!' + addprefix_text.getValue() + '!');
+ reg.push(encodeURIComponent('!^!' + addprefix_text.getValue() + '!'));
}
if (addsuffix_text.getValue()) {
- reg.push('!([^/])$!$1' + addsuffix_text.getValue() + '!');
+ reg.push(encodeURIComponent('!([^/])$!$1' + addsuffix_text.getValue() + '!'));
}
res4 = res4 + ';regexwhere=' + reg.join(',');
}
} else {
- res4 = res4 + ';where=' + where_text.getValue();
+ res4 = res4 + ';where=' + encodeURIComponent(where_text.getValue());
}
- window.location='/cgi-bin/bweb/bresto.pl?action=restore' + res + res2 + res3 + res4;
+ window.location='/cgi-bin/bweb/bresto.pl?action=restore' + dir + res + res2 + res3 + res5 + res4;
} // end launch_restore
////////////////////////////////////////////////////////////////
reload_media_store();
+ storage_store.load({params:init_params({action: 'list_storage'})});
Ext.brestore.dlglaunch.show();
-// storage_store.load({params:{action: 'list_storage'}});
}
});
\ No newline at end of file
Bweb - A Bacula web interface
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bweb is Eric Bollengier.
The main author of Bacula is Kern Sibbald, with contributions from
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
-=head1 VERSION
-
- $Id$
-
=cut
sub new
{
my ($class, %arg) = @_;
-
+ my $dir = $arg{pref}->{current_conf};
+ if ($dir) {
+ if (!$arg{pref}->{main_conf}->{bconsole} ||
+ ($arg{pref}->{main_conf}->{bconsole} ne $arg{pref}->{bconsole}))
+ {
+ # If bconsole string is different, don't use director option
+ $dir = undef;
+ }
+ }
+ if ($arg{director}) {
+ $dir = $arg{director};
+ }
my $self = bless {
- pref => $arg{pref}, # Pref object
- bconsole => undef, # Expect object
- log_stdout => $arg{log_stdout} || 0,
- timeout => $arg{timeout} || 20,
- debug => $arg{debug} || 0,
+ pref => $arg{pref}, # Pref object
+ dir => $dir, # Specify the director to connect
+ bconsole => undef, # Expect object
+ log_stdout => $arg{log_stdout} || 0,
+ timeout => $arg{timeout} || 20,
+ debug => $arg{debug} || 0,
};
return $self;
my ($self, $error) = @_;
$self->{error} = $error;
if ($error) {
- print STDERR "E: bconsole (", $self->{pref}->{bconsole}, ") $! $error\n";
+ print STDERR "E: bconsole (", $self->{pref}->{bconsole}, ") $! $error\n";
+ print STDERR "I: Check permissions on binary and configuration file\n";
+ print STDERR "I: Try to execute it in a terminal with a su command\n";
}
return 0;
}
unless (@cmd) {
return $self->error("bconsole string not found");
}
+ if ($self->{dir}) {
+ push @cmd, "-D", $self->{dir}; # should handle spaces
+ }
+ if ($self->{pref}->{debug}) {
+ my $c = join(' ', @cmd, '-t');
+ my $out = `$c 2>&1`;
+ if ($? != 0) {
+ print "bconsole test ($c): $out";
+ }
+ print STDERR "bconsole test ($c) ret=$?: $out";
+ }
$self->{bconsole} = new Expect;
$self->{bconsole}->raw_pty(1);
$self->{bconsole}->debug($self->{debug});
return sort split(/\r?\n/, $self->send_cmd(".jobs"));
}
+sub set_director
+{
+ my ($self, $dir) = @_;
+ $self->{dir} = $dir;
+}
+
+sub list_directors
+{
+ my ($self) = @_;
+ my $lst = `$self->{pref}->{bconsole} -l`;
+ if ($? != 0) {
+ return $self->error("Director list unsupported by bconsole");
+ }
+ return sort split(/\r?\n/, $lst);
+}
+
sub list_fileset
{
my ($self) = @_;
__END__
-# to use this
+# to use this, run backup-bacula-test and run
# grep -v __END__ Bconsole.pm | perl
package main;
+use File::Copy 'copy';
use Data::Dumper qw/Dumper/;
-print "test sans conio\n";
+use Test::More tests => 22;
+
+my $bin = "/tmp/regress/bin/bconsole";
+my $conf = "/tmp/regress/bin/bconsole.conf";
+print "Test without conio\n";
+
+ok(copy($conf, "$conf.org"), "Backup original conf");
+
+system("sed 's/Name = .*/Name = \"zog6 dir\"/' $conf > /tmp/1");
+system("sed 's/Name = .*/Name = zog5-dir/' $conf >> /tmp/1");
+system("grep zog5-dir $conf > /dev/null || cat /tmp/1 >> $conf");
my $c = new Bconsole(pref => {
- bconsole => '/tmp/regress/bin/bconsole -n -c /tmp/regress/bin/bconsole.conf',
+ bconsole => "$bin -n -c $conf",
},
- debug => 0);
+ debug => 0);
+
+ok($c, "Create Bconsole object");
+
+my @lst = $c->list_directors();
+is(scalar(@lst), 3, "Should find 3 directors");
+print "directors : ", join(',', @lst), "\n";
+$c->set_director(pop(@lst));
+is($c->{dir}, "zog6 dir", "Check current director");
+
+@lst = $c->list_fileset();
+is(scalar(@lst), 2, "Should find 2 fileset");
+print "fileset : ", join(',', @lst), "\n";
+
+@lst = $c->list_job();
+is(scalar(@lst), 3, "Should find 3 jobs");
+print "job : ", join(',', @lst), "\n";
-print "fileset : ", join(',', $c->list_fileset()), "\n";
-print "job : ", join(',', $c->list_job()), "\n";
+@lst = $c->list_restore();
+is(scalar(@lst), 1, "Should find 1 jobs");
+print "restore : ", join(',', @lst), "\n";
+
+@lst = $c->list_backup();
+is(scalar(@lst), 2, "Should find 2 jobs");
+print "backup : ", join(',', @lst), "\n";
+
+@lst = $c->list_storage();
+is(scalar(@lst), 1, "Should find 1 storage");
print "storage : ", join(',', $c->list_storage()), "\n";
+
my $r = $c->get_fileset($c->list_fileset());
+ok(ref $r, "Check get_fileset return a ref");
+ok(ref $r->{I}, "Check Include is array");
print Dumper($r);
print "FS Include:\n", join (",", map { $_->{file} } @{$r->{I}}), "\n";
print "FS Exclude:\n", join (",", map { $_->{file} } @{$r->{E}}), "\n";
-#print $c->label_barcodes(pool => 'Scratch', drive => 0, storage => 'LTO3', slots => '45');
-#print "prune : " . $c->prune_volume('000001'), "\n";
-#print "update : " . $c->send_cmd('update slots storage=SDLT-1-2, drive=0'), "\n";
-#print "label : ", join(',', $c->label_barcodes(storage => 'SDLT-1-2',
-# slots => 6,
-# drive => 0)), "\n";
+$c->close();
+
+
+ok(copy("$conf.org", $conf), "Restore conf");
+my $c = new Bconsole(pref => {
+ bconsole => "$bin -n -c $conf",
+},
+ debug => 0);
+
+
+ok($c, "Create Bconsole object");
+@lst = $c->list_directors();
+
+is(scalar(@lst), 1, "Should find 1 directors");
+print "directors : ", join(',', @lst), "\n";
+@lst = $c->list_client();
+is(scalar(@lst), 1, "Should find 1 client");
+
+@lst = $c->list_pool();
+is(scalar(@lst), 2, "Should find 2 pool");
+
+@lst = $c->list_fileset();
+is(scalar(@lst), 2, "Should find 2 fileset");
+print "fileset : ", join(',', @lst), "\n";
+
+@lst = $c->list_job();
+is(scalar(@lst), 3, "Should find 3 jobs");
+print "job : ", join(',', @lst), "\n";
+
+@lst = $c->list_restore();
+is(scalar(@lst), 1, "Should find 1 jobs");
+print "restore : ", join(',', @lst), "\n";
+
+@lst = $c->list_storage();
+is(scalar(@lst), 1, "Should find 1 storage");
+print "storage : ", join(',', @lst), "\n";
+
+my $r = $c->get_fileset($c->list_fileset());
+ok(ref $r, "Check get_fileset return a ref");
+ok(ref $r->{I}, "Check Include is array");
+print Dumper($r);
+print "FS Include:\n", join (",", map { $_->{file} } @{$r->{I}}), "\n";
+print "FS Exclude:\n", join (",", map { $_->{file} } @{$r->{E}}), "\n";
+$c->close();