9) using bfileview.pl
10) accessing to bweb
11) setting mysql read-only account
+12) get more statistics
################ FILE COPY #####################################
# you must get bweb svn files
ALTER TABLE brestore_pathvisibility ADD Size int8;
ALTER TABLE brestore_pathvisibility ADD Files int4;
-################ ACCESSING TO BWEB ###############################
+################ ACCESSING TO BWEB ############################
Now, you are able to launch firefox/mozilla and go on
http://your-server/bweb
-################ SETTING MYSQL ACCOUNT ###########################
+################ SETTING MYSQL ACCOUNT ########################
At this time, Bweb needs a write access to :
- Location
GRANT UPDATE (LocationId,Comment,RecyclePoolId) ON bacula.Media
TO 'bweb'@'%' IDENTIFIED BY 'password';
-###################################################################
+################ GET MORE STATISTICS ###########################
+
+You keep Jobs informations across retention into a job_old table.
+You have to setup stat_job_table = job_old in bweb configuration.
+
+CREATE TABLE job_old (LIKE Job);
+
+And run this on crontab when you want :
+INSERT INTO job_old
+ (SELECT * FROM Job WHERE JobId NOT IN (SELECT JobId FROM job_old) );
+
+################################################################
Enjoy !
my $conf = new Bweb::Config(config_file => $Bweb::config_file);
$conf->load();
-
my $bweb = new Bweb(info => $conf);
$bweb->connect_db();
my $dbh = $bweb->{dbh};
my $debug = $bweb->{debug};
+# Job table keep use Media or Job retention, so it's quite enought
+# for good statistics
+# CREATE TABLE job_old (LIKE Job);
+# INSERT INTO job_old
+# (SELECT * FROM Job WHERE JobId NOT IN (SELECT JobId FROM job_old) );
+my $jobt = $conf->{stat_job_table} || 'Job';
+
my $graph = CGI::param('graph') || 'job_size';
my $legend = CGI::param('legend') || 'on' ;
$legend = ($legend eq 'on')?1:0;
my ($limitq, $label) = $bweb->get_limit(age => $arg->{age},
limit => $arg->{limit},
offset=> $arg->{offset},
- order => 'Job.StartTime ASC',
+ order => "$jobt.StartTime ASC",
);
my $statusq='';
if ($arg->{status} and $arg->{status} ne 'Any') {
- $statusq = " AND Job.JobStatus = '$arg->{status}' ";
+ $statusq = " AND $jobt.JobStatus = '$arg->{status}' ";
}
my $levelq='';
if ($arg->{level} and $arg->{level} ne 'Any') {
- $levelq = " AND Job.Level = '$arg->{level}' ";
+ $levelq = " AND $jobt.Level = '$arg->{level}' ";
}
my $filesetq='';
my $jobnameq='';
if ($arg->{jjobnames}) {
- $jobnameq = " AND Job.Name IN ($arg->{jjobnames}) ";
+ $jobnameq = " AND $jobt.Name IN ($arg->{jjobnames}) ";
} else {
$arg->{jjobnames} = 'all'; # skip warning
}
my $query = "
SELECT
- UNIX_TIMESTAMP(Job.StartTime) AS starttime,
+ UNIX_TIMESTAMP($jobt.StartTime) AS starttime,
Client.Name AS clientname,
- Job.Name AS jobname,
- Job.JobBytes AS jobbytes
-FROM Job, Client, FileSet
-WHERE Job.ClientId = Client.ClientId
- AND Job.FileSetId = FileSet.FileSetId
- AND Job.Type = 'B'
+ $jobt.Name AS jobname,
+ $jobt.JobBytes AS jobbytes
+FROM $jobt, Client, FileSet
+WHERE $jobt.ClientId = Client.ClientId
+ AND $jobt.FileSetId = FileSet.FileSetId
+ AND $jobt.Type = 'B'
$clientq
$statusq
$filesetq
my $query = "
SELECT
- UNIX_TIMESTAMP(Job.StartTime) AS starttime,
+ UNIX_TIMESTAMP($jobt.StartTime) AS starttime,
Client.Name AS clientname,
- Job.Name AS jobname,
- Job.JobFiles AS jobfiles
-FROM Job, Client, FileSet
-WHERE Job.ClientId = Client.ClientId
- AND Job.FileSetId = FileSet.FileSetId
- AND Job.Type = 'B'
+ $jobt.Name AS jobname,
+ $jobt.JobFiles AS jobfiles
+FROM $jobt, Client, FileSet
+WHERE $jobt.ClientId = Client.ClientId
+ AND $jobt.FileSetId = FileSet.FileSetId
+ AND $jobt.Type = 'B'
$clientq
$statusq
$filesetq
}
# it works only with postgresql at this time
+# we dont use $jobt because we use File, so job is in Job table
elsif ($graph eq 'file_histo' and $arg->{where}) {
my $dir = $dbh->quote(dirname($arg->{where}) . '/');
my $query = "
SELECT
- UNIX_TIMESTAMP(Job.StartTime) AS starttime,
+ UNIX_TIMESTAMP($jobt.StartTime) AS starttime,
Client.Name AS clientname,
- Job.Name AS jobname,
- Job.JobBytes /
+ $jobt.Name AS jobname,
+ $jobt.JobBytes /
($bweb->{sql}->{SEC_TO_INT}(
$bweb->{sql}->{UNIX_TIMESTAMP}(EndTime)
- $bweb->{sql}->{UNIX_TIMESTAMP}(StartTime)) + 0.01)
AS rate
-FROM Job, Client, FileSet
-WHERE Job.ClientId = Client.ClientId
- AND Job.FileSetId = FileSet.FileSetId
- AND Job.Type = 'B'
+FROM $jobt, Client, FileSet
+WHERE $jobt.ClientId = Client.ClientId
+ AND $jobt.FileSetId = FileSet.FileSetId
+ AND $jobt.Type = 'B'
$clientq
$statusq
$filesetq
my $query = "
SELECT
- UNIX_TIMESTAMP(Job.StartTime) AS starttime,
+ UNIX_TIMESTAMP($jobt.StartTime) AS starttime,
Client.Name AS clientname,
- Job.Name AS jobname,
+ $jobt.Name AS jobname,
$bweb->{sql}->{SEC_TO_INT}( $bweb->{sql}->{UNIX_TIMESTAMP}(EndTime)
- $bweb->{sql}->{UNIX_TIMESTAMP}(StartTime))
AS duration
-FROM Job, Client, FileSet
-WHERE Job.ClientId = Client.ClientId
- AND Job.FileSetId = FileSet.FileSetId
- AND Job.Type = 'B'
+FROM $jobt, Client, FileSet
+WHERE $jobt.ClientId = Client.ClientId
+ AND $jobt.FileSetId = FileSet.FileSetId
+ AND $jobt.Type = 'B'
$clientq
$statusq
$filesetq
if ($t eq 'sum' or $t eq 'avg') {
push @arg, ('y_number_format' => \&Bweb::human_size);
}
+
+ my $stime = $bweb->{sql}->{"STARTTIME_$d"};
+ $stime =~ s/Job\./$jobt\./;
my $query = "
SELECT
- " . ($per_t?"":"UNIX_TIMESTAMP") . "(" . $bweb->{sql}->{"STARTTIME_$d"} . ") AS A,
+ " . ($per_t?"":"UNIX_TIMESTAMP") . "($stime) AS A,
$t(JobBytes) AS nb
-FROM Job, Client, FileSet
-WHERE Job.ClientId = Client.ClientId
- AND Job.FileSetId = FileSet.FileSetId
- AND Job.Type = 'B'
+FROM $jobt, Client, FileSet
+WHERE $jobt.ClientId = Client.ClientId
+ AND $jobt.FileSetId = FileSet.FileSetId
+ AND $jobt.Type = 'B'
$clientq
$statusq
$filesetq