db_lock(mdb);
db_start_transaction(jcr, mdb);
+#ifdef xxx
/* TODO: Remove this code when updating make_bacula_table script */
Mmsg(mdb->cmd, "SELECT 1 FROM Job WHERE HasCache<>2 LIMIT 1");
if (!QUERY_DB(jcr, mdb, mdb->cmd)) {
QUERY_DB(jcr, mdb, mdb->cmd);
}
+#endif
Mmsg(mdb->cmd,
"SELECT JobId from Job "
#ifdef HAVE_SQLITE
#error "SQLite2 is now deprecated, use SQLite3 instead."
-#define BDB_VERSION 11
+#define BDB_VERSION 12
#include <sqlite.h>
#ifdef HAVE_SQLITE3
-#define BDB_VERSION 11
+#define BDB_VERSION 12
#include <sqlite3.h>
#ifdef HAVE_MYSQL
-#define BDB_VERSION 11
+#define BDB_VERSION 12
#include <mysql.h>
#ifdef HAVE_POSTGRESQL
-#define BDB_VERSION 11
+#define BDB_VERSION 12
#include <libpq-fe.h>
#ifdef HAVE_DBI
-#define BDB_VERSION 11
+#define BDB_VERSION 12
#include <dbi/dbi.h>
# shell script to create Bacula MySQL tables
#
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
-if $bindir/mysql $* -f <<END-OF-DATA
+if mysql $* -f <<END-OF-DATA
USE ${db_name};
--
-- Note, we use BLOB rather than TEXT because in MySQL,
PriorJobId INTEGER UNSIGNED DEFAULT 0 REFERENCES Job,
PurgedFiles TINYINT DEFAULT 0,
HasBase TINYINT DEFAULT 0,
+ HasCache TINYINT DEFAULT 0,
+ Comment BLOB,
PRIMARY KEY(JobId),
INDEX (Name(128))
);
PriorJobId INTEGER UNSIGNED DEFAULT 0,
PurgedFiles TINYINT DEFAULT 0,
HasBase TINYINT DEFAULT 0,
+ HasCache TINYINT DEFAULT 0,
+ Comment BLOB,
INDEX (StartTime)
);
StartBlock INTEGER UNSIGNED DEFAULT 0,
EndBlock INTEGER UNSIGNED DEFAULT 0,
VolIndex INTEGER UNSIGNED DEFAULT 0,
- Copy INTEGER UNSIGNED DEFAULT 0,
- Stripe INTEGER UNSIGNED DEFAULT 0,
PRIMARY KEY(JobMediaId),
INDEX (JobId, MediaId)
);
PRIMARY KEY(BaseId)
);
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
CREATE TABLE UnsavedFiles (
UnsavedId INTEGER UNSIGNED AUTO_INCREMENT,
JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
CREATE TABLE Status (
JobStatus CHAR(1) BINARY NOT NULL,
- JobStatusLong BLOB,
+ JobStatusLong BLOB,
+ Severity INT,
PRIMARY KEY (JobStatus)
);
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('C', 'Created, not yet running'),
- ('R', 'Running'),
- ('B', 'Blocked'),
- ('T', 'Completed successfully'),
- ('E', 'Terminated with errors'),
- ('e', 'Non-fatal error'),
- ('f', 'Fatal error'),
- ('D', 'Verify found differences'),
- ('A', 'Canceled by user'),
- ('F', 'Waiting for Client'),
- ('S', 'Waiting for Storage daemon'),
- ('m', 'Waiting for new media'),
- ('M', 'Waiting for media mount'),
- ('s', 'Waiting for storage resource'),
- ('j', 'Waiting for job resource'),
- ('c', 'Waiting for client resource'),
- ('d', 'Waiting on maximum jobs'),
- ('t', 'Waiting on start time'),
- ('p', 'Waiting on higher priority jobs'),
- ('i', 'Doing batch insert file records'),
- ('a', 'SD despooling attributes');
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('C', 'Created, not yet running',15),
+ ('R', 'Running',15),
+ ('B', 'Blocked',15),
+ ('T', 'Completed successfully',10),
+ ('E', 'Terminated with errors',25),
+ ('e', 'Non-fatal error',20),
+ ('f', 'Fatal error',100),
+ ('D', 'Verify found differences',15),
+ ('A', 'Canceled by user',90),
+ ('F', 'Waiting for Client',15),
+ ('S', 'Waiting for Storage daemon',15),
+ ('m', 'Waiting for new media',15),
+ ('M', 'Waiting for media mount',15),
+ ('s', 'Waiting for storage resource',15),
+ ('j', 'Waiting for job resource',15),
+ ('c', 'Waiting for client resource',15),
+ ('d', 'Waiting on maximum jobs',15),
+ ('t', 'Waiting on start time',15),
+ ('p', 'Waiting on higher priority jobs',15),
+ ('i', 'Doing batch insert file records',15),
+ ('a', 'SD despooling attributes',15);
+
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
+
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
CREATE TABLE Version (
VersionId INTEGER UNSIGNED NOT NULL
);
-- Initialize Version
-INSERT INTO Version (VersionId) VALUES (11);
+INSERT INTO Version (VersionId) VALUES (12);
END-OF-DATA
then
# shell script to create Bacula PostgreSQL tables
#
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
-$bindir/psql -f - -d ${db_name} $* <<END-OF-DATA
+psql -f - -d ${db_name} $* <<END-OF-DATA
CREATE TABLE filename
(
primary key (fileid)
);
-CREATE INDEX file_jpfid_idx on file (jobid, pathid, filenameid);
+CREATE INDEX file_jpfid_idx on File (JobId, PathId, FilenameId);
-- If you need performances, you can remove this index
-- the database engine is able to use the composite index
-- CREATE INDEX file_pathid_idx on file(pathid);
-- CREATE INDEX file_filenameid_idx on file(filenameid);
-CREATE TABLE job
+CREATE TABLE Job
(
- jobid serial not null,
- job text not null,
- name text not null,
- type char(1) not null,
- level char(1) not null,
- clientid integer default 0,
- jobstatus char(1) not null,
- schedtime timestamp without time zone,
- starttime timestamp without time zone,
- endtime timestamp without time zone,
- realendtime timestamp without time zone,
- jobtdate bigint default 0,
- volsessionid integer default 0,
- volsessiontime integer default 0,
- jobfiles integer default 0,
- jobbytes bigint default 0,
- readbytes bigint default 0,
- joberrors integer default 0,
- jobmissingfiles integer default 0,
- poolid integer default 0,
- filesetid integer default 0,
- purgedfiles smallint default 0,
- hasbase smallint default 0,
- priorjobid integer default 0,
+ JobId serial not null,
+ Job text not null,
+ Name text not null,
+ Type char(1) not null,
+ Level char(1) not null,
+ ClientId integer default 0,
+ JobStatus char(1) not null,
+ SchedTime timestamp without time zone,
+ StartTime timestamp without time zone,
+ EndTime timestamp without time zone,
+ RealEndTime timestamp without time zone,
+ JobTDate bigint default 0,
+ VolSessionId integer default 0,
+ volSessionTime integer default 0,
+ JobFiles integer default 0,
+ JobBytes bigint default 0,
+ ReadBytes bigint default 0,
+ JobErrors integer default 0,
+ JobMissingFiles integer default 0,
+ PoolId integer default 0,
+ FilesetId integer default 0,
+ PriorJobid integer default 0,
+ PurgedFiles smallint default 0,
+ HasBase smallint default 0,
+ HasCache smallint default 0,
+ Comment text,
primary key (jobid)
);
-- Create a table like Job for long term statistics
CREATE TABLE JobHisto (LIKE Job);
-CREATE INDEX jobhisto_idx ON jobhisto ( starttime );
+CREATE INDEX jobhisto_idx ON JobHisto ( StartTime );
CREATE TABLE Location (
startblock bigint default 0,
endblock bigint default 0,
volindex integer default 0,
- copy integer default 0,
primary key (jobmediaid)
);
primary key (baseid)
);
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
CREATE TABLE unsavedfiles
(
UnsavedId integer not null,
);
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
+
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
+
CREATE TABLE version
(
versionid integer not null
CREATE TABLE Status (
JobStatus CHAR(1) NOT NULL,
- JobStatusLong TEXT,
+ JobStatusLong TEXT,
+ Severity int,
PRIMARY KEY (JobStatus)
);
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('C', 'Created, not yet running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('R', 'Running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('B', 'Blocked');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('T', 'Completed successfully');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('E', 'Terminated with errors');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('e', 'Non-fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('f', 'Fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('D', 'Verify found differences');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('A', 'Canceled by user');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('F', 'Waiting for Client');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('S', 'Waiting for Storage daemon');
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('C', 'Created, not yet running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('R', 'Running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('B', 'Blocked',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('T', 'Completed successfully', 10);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('E', 'Terminated with errors', 25);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('e', 'Non-fatal error',20);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('f', 'Fatal error',100);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('D', 'Verify found differences',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('A', 'Canceled by user',90);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('F', 'Waiting for Client',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('S', 'Waiting for Storage daemon',15);
INSERT INTO Status (JobStatus,JobStatusLong) VALUES
('m', 'Waiting for new media');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('M', 'Waiting for media mount');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('s', 'Waiting for storage resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('j', 'Waiting for job resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('c', 'Waiting for client resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('d', 'Waiting on maximum jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('t', 'Waiting on start time');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('p', 'Waiting on higher priority jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('a', 'SD despooling attributes');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('i', 'Doing batch insert file records');
-
-INSERT INTO Version (VersionId) VALUES (11);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('M', 'Waiting for media mount',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('s', 'Waiting for storage resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('j', 'Waiting for job resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('c', 'Waiting for client resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('d', 'Waiting on maximum jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('t', 'Waiting on start time',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('p', 'Waiting on higher priority jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('a', 'SD despooling attributes',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('i', 'Doing batch insert file records',15);
+
+INSERT INTO Version (VersionId) VALUES (12);
-- Make sure we have appropriate permissions
# shell script to create Bacula SQLite tables
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
cd @working_dir@
sqlite=@DB_TYPE@
db_name=@db_name@
-${bindir}/${sqlite} $* ${db_name}.db <<END-OF-DATA
+${sqlite} $* ${db_name}.db <<END-OF-DATA
CREATE TABLE Filename (
FilenameId INTEGER,
Name TEXT DEFAULT '',
PriorJobId INTEGER UNSIGNED REFERENCES Job DEFAULT 0,
PurgedFiles TINYINT DEFAULT 0,
HasBase TINYINT DEFAULT 0,
+ HasCache TINYINT DEFAULT 0,
+ Comment TEXT,
PRIMARY KEY(JobId)
);
CREATE INDEX inx6 ON Job (Name);
FileSetId INTEGER UNSIGNED DEFAULT 0,
PriorJobId INTEGER UNSIGNED DEFAULT 0,
PurgedFiles TINYINT DEFAULT 0,
- HasBase TINYINT DEFAULT 0
+ HasBase TINYINT DEFAULT 0,
+ HasCache TINYINT DEFAULT 0,
+ Comment TEXT
);
CREATE INDEX inx61 ON JobHisto (StartTime);
StartBlock INTEGER UNSIGNED DEFAULT 0,
EndBlock INTEGER UNSIGNED DEFAULT 0,
VolIndex INTEGER UNSIGNED DEFAULT 0,
- Copy INTEGER UNSIGNED DEFAULT 0,
PRIMARY KEY(JobMediaId)
);
PRIMARY KEY(BaseId)
);
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
CREATE TABLE UnsavedFiles (
UnsavedId INTEGER,
JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
PRIMARY KEY (MediaId)
);
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
+
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
+
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
CREATE TABLE Status (
JobStatus CHAR(1) NOT NULL,
JobStatusLong BLOB,
+ Severity INT,
PRIMARY KEY (JobStatus)
);
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('C', 'Created, not yet running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('R', 'Running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('B', 'Blocked');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('T', 'Completed successfully');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('E', 'Terminated with errors');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('e', 'Non-fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('f', 'Fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('D', 'Verify found differences');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('A', 'Canceled by user');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('F', 'Waiting for Client');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('S', 'Waiting for Storage daemon');
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('C', 'Created, not yet running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('R', 'Running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('B', 'Blocked',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('T', 'Completed successfully', 10);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('E', 'Terminated with errors', 25);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('e', 'Non-fatal error',20);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('f', 'Fatal error',100);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('D', 'Verify found differences',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('A', 'Canceled by user',90);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('F', 'Waiting for Client',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('S', 'Waiting for Storage daemon',15);
INSERT INTO Status (JobStatus,JobStatusLong) VALUES
('m', 'Waiting for new media');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('M', 'Waiting for media mount');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('s', 'Waiting for storage resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('j', 'Waiting for job resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('c', 'Waiting for client resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('d', 'Waiting on maximum jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('t', 'Waiting on start time');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('p', 'Waiting on higher priority jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('a', 'SD despooling attributes');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('i', 'Doing batch insert file records');
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('M', 'Waiting for media mount',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('s', 'Waiting for storage resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('j', 'Waiting for job resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('c', 'Waiting for client resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('d', 'Waiting on maximum jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('t', 'Waiting on start time',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('p', 'Waiting on higher priority jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('a', 'SD despooling attributes',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('i', 'Doing batch insert file records',15);
-- Initialize Version
-INSERT INTO Version (VersionId) VALUES (11);
+INSERT INTO Version (VersionId) VALUES (12);
PRAGMA default_cache_size = 100000;
"SchedTime, StartTime, EndTime, RealEndTime, JobTDate, "
"VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes, "
"JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId, "
- "PurgedFiles, HasBase ) "
+ "PurgedFiles, HasBase, Comment ) "
"SELECT "
"JobId, Job, Name, Type, Level, ClientId, JobStatus, "
"SchedTime, StartTime, EndTime, RealEndTime, JobTDate, "
"VolSessionId, VolSessionTime, JobFiles, JobBytes, ReadBytes, "
"JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId, "
- "PurgedFiles, HasBase "
+ "PurgedFiles, HasBase, Comment "
"FROM Job "
"WHERE JobStatus IN ('T','W','f','A','E') "
"AND JobId NOT IN (SELECT JobId FROM JobHisto) "
#!/bin/sh
#
-# Shell script to update MySQL tables from version 2.0 to 3.0
+# Shell script to update MySQL tables from Bacula Enterprise 2.6 to 4.0 or
+# Standard version 3.0 to 5.0
#
echo " "
-echo "This script will update a Bacula MySQL database from version 10 to 11"
-echo " which is needed to convert from Bacula version 2.0.x to 3.0.x or higher"
+echo "This script will update a Bacula MySQL database from version 11 to 12"
+echo " which is needed to convert from Bacula Enterprise 2.6 to 4.0 or "
+echo " Standard version 3.0 to 5.0"
echo " "
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
db_name=@db_name@
-if $bindir/mysql $* -f <<END-OF-DATA
+if mysql $* -f <<END-OF-DATA
USE ${db_name};
--- Fix bad index on Media table
-DROP INDEX inx8 ON Media;
-CREATE UNIQUE INDEX inx8 ON Media (VolumeName(128));
-ALTER TABLE File CHANGE FileId FileId BIGINT UNSIGNED NOT NULL AUTO_INCREMENT;
-ALTER TABLE BaseFiles CHANGE FileId FileId BIGINT UNSIGNED NOT NULL;
-ALTER TABLE Job ADD ReadBytes BIGINT UNSIGNED DEFAULT 0 AFTER JobBytes;
-ALTER TABLE Media ADD ActionOnPurge TINYINT DEFAULT 0 AFTER Recycle;
-ALTER TABLE Pool ADD ActionOnPurge TINYINT DEFAULT 0 AFTER Recycle;
+ALTER TABLE JobMedia DROP Stripe ;
+ALTER TABLE JobMedia DROP Copy ;
+ALTER TABLE Job ADD COLUMN HasCache tinyint default 0 after HasBase;
+ALTER TABLE Job ADD Comment BLOB AFTER HasCache;
+ALTER TABLE JobHisto ADD COLUMN HasCache tinyint default 0 after HasBase;
+ALTER TABLE JobHisto ADD Comment BLOB AFTER HasCache;
-DELETE FROM Version;
-INSERT INTO Version (VersionId) VALUES (11);
+ALTER TABLE Status ADD COLUMN Severity int;
+UPDATE Status SET Severity = 15;
+UPDATE Status SET Severity = 100 where JobStatus = 'f';
+UPDATE Status SET Severity = 90 where JobStatus = 'A';
+UPDATE Status SET Severity = 10 where JobStatus = 'T';
+UPDATE Status SET Severity = 20 where JobStatus = 'e';
+UPDATE Status SET Severity = 25 where JobStatus = 'E';
+
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
--- If you have already this table, you can remove it with:
--- DROP TABLE JobHistory;
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
--- Create a table like Job for long term statistics
-CREATE TABLE JobHisto (
- JobId INTEGER UNSIGNED NOT NULL,
- Job TINYBLOB NOT NULL,
- Name TINYBLOB NOT NULL,
- Type BINARY(1) NOT NULL,
- Level BINARY(1) NOT NULL,
- ClientId INTEGER DEFAULT 0,
- JobStatus BINARY(1) NOT NULL,
- SchedTime DATETIME DEFAULT 0,
- StartTime DATETIME DEFAULT 0,
- EndTime DATETIME DEFAULT 0,
- RealEndTime DATETIME DEFAULT 0,
- JobTDate BIGINT UNSIGNED DEFAULT 0,
- VolSessionId INTEGER UNSIGNED DEFAULT 0,
- VolSessionTime INTEGER UNSIGNED DEFAULT 0,
- JobFiles INTEGER UNSIGNED DEFAULT 0,
- JobBytes BIGINT UNSIGNED DEFAULT 0,
- ReadBytes BIGINT UNSIGNED DEFAULT 0,
- JobErrors INTEGER UNSIGNED DEFAULT 0,
- JobMissingFiles INTEGER UNSIGNED DEFAULT 0,
- PoolId INTEGER UNSIGNED DEFAULT 0,
- FileSetId INTEGER UNSIGNED DEFAULT 0,
- PriorJobId INTEGER UNSIGNED DEFAULT 0,
- PurgedFiles TINYINT DEFAULT 0,
- HasBase TINYINT DEFAULT 0,
- INDEX (StartTime)
- );
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
+DELETE FROM Version;
+INSERT INTO Version (VersionId) VALUES (12);
END-OF-DATA
then
#!/bin/sh
#
-# Shell script to update PostgreSQL tables from version 2.0.0 to 3.0.0 or higher
+# Shell script to update PostgreSQL tables from version 11 to 12
#
echo " "
-echo "This script will update a Bacula PostgreSQL database from version 10 to 11"
-echo " which is needed to convert from Bacula version 2.0.0 to 3.0.x or higher"
+echo "This script will update a Bacula PostgreSQL database from version 11 to 12"
+echo " which is needed to convert from Bacula Enterprise 2.6 to 4.0 or "
+echo " Standard version 3.0 to 5.0"
echo " "
+
bindir=@SQL_BINDIR@
+export PATH="$bindir:$PATH"
db_name=@db_name@
-if $bindir/psql -f - -d ${db_name} $* <<END-OF-DATA
+if psql -f - -d ${db_name} $* <<END-OF-DATA
+BEGIN;
+ALTER TABLE JobMedia DROP Copy ;
+ALTER TABLE Job ADD COLUMN HasCache smallint default 0;
+ALTER TABLE Job ADD COLUMN Comment text;
+ALTER TABLE JobHisto ADD COLUMN Comment text;
+ALTER TABLE JobHisto ADD COLUMN HasCache smallint default 0;
--- The alter table operation can be faster with a big maintenance_work_mem
--- Uncomment and adapt this value to your environment
--- SET maintenance_work_mem = '1GB';
+ALTER TABLE Status ADD COLUMN Severity int;
+UPDATE Status SET Severity = 15;
+UPDATE Status SET Severity = 100 where JobStatus = 'f';
+UPDATE Status SET Severity = 90 where JobStatus = 'A';
+UPDATE Status SET Severity = 10 where JobStatus = 'T';
+UPDATE Status SET Severity = 20 where JobStatus = 'e';
+UPDATE Status SET Severity = 25 where JobStatus = 'E';
-BEGIN;
-ALTER TABLE file ALTER fileid TYPE bigint ;
-ALTER TABLE basefiles ALTER fileid TYPE bigint;
-ALTER TABLE job ADD COLUMN readbytes bigint default 0;
-ALTER TABLE media ADD COLUMN ActionOnPurge smallint default 0;
-ALTER TABLE pool ADD COLUMN ActionOnPurge smallint default 0;
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
+
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
--- Create a table like Job for long term statistics
-CREATE TABLE JobHisto (LIKE Job);
-CREATE INDEX jobhisto_idx ON JobHisto ( starttime );
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
-UPDATE Version SET VersionId=11;
+
+UPDATE Version SET VersionId=12;
COMMIT;
--- If you have already this table, you can remove it with:
--- DROP TABLE JobHistory;
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
+-- Remove bad PostgreSQL index
+DROP INDEX file_fp_idx;
+
+-- Create the good one
+-- If you want to create this index during production, you can use
+-- CREATE INDEX CONCURRENTLY file_jpf_idx ON File (JobId, PathId, FilenameId)
+-- to make it without locks (require PostgreSQL 8.2 version)
+
+CREATE INDEX file_jpfid_idx on File (JobId, PathId, FilenameId);
+
+ANALYSE;
--- vacuum analyse;
END-OF-DATA
then
echo "Update of Bacula PostgreSQL tables succeeded."
#!/bin/sh
#
-# shell script to update SQLite from version 2.0 to 3.0
+# This script is needed to convert from Bacula Enterprise 2.6 to 4.0 or
+# Standard version 3.0 to 5.0
#
echo " "
-echo "This script will update a Bacula SQLite database from version 10 to 11"
-echo " which is needed to convert from Bacula version 2.0.x to 3.0.x or higher"
+echo "This script will update a Bacula SQLite database from version 11 to 12"
+echo " which is needed to convert from Bacula Enterprise 2.6 to 4.0 or "
+echo " Standard version 3.0 to 5.0"
echo "Depending on the size of your database,"
echo "this script may take several minutes to run."
echo " "
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
cd @working_dir@
sqlite=@DB_TYPE@
db_name=@db_name@
-${bindir}/${sqlite} $* ${db_name}.db <<END-OF-DATA
--- Can be replaced by
--- ALTER TABLE Job ADD COLUMN (ReadBytes BIGINT UNSIGNED DEFAULT 0);
-
-BEGIN TRANSACTION;
-CREATE TEMPORARY TABLE job_backup AS SELECT * FROM Job;
-DROP TABLE Job;
-
-CREATE TABLE Job
+${sqlite} $* ${db_name}.db <<END-OF-DATA
+BEGIN;
+ALTER TABLE Job ADD COLUMN HasCache TINYINT DEFAULT 0;
+ALTER TABLE Job ADD COLUMN Comment TEXT;
+ALTER TABLE JobHisto ADD COLUMN HasCache TINYINT DEFAULT 0;
+ALTER TABLE JobHisto ADD COLUMN Comment TEXT;
+
+ALTER TABLE Status ADD COLUMN Severity int;
+UPDATE Status SET Severity = 15;
+UPDATE Status SET Severity = 100 where JobStatus = 'f';
+UPDATE Status SET Severity = 90 where JobStatus = 'A';
+UPDATE Status SET Severity = 10 where JobStatus = 'T';
+UPDATE Status SET Severity = 20 where JobStatus = 'e';
+UPDATE Status SET Severity = 25 where JobStatus = 'E';
+
+CREATE TABLE PathHierarchy
(
- JobId INTEGER,
- Job VARCHAR(128) NOT NULL,
- Name VARCHAR(128) NOT NULL,
- Type CHAR NOT NULL,
- Level CHAR NOT NULL,
- ClientId INTEGER REFERENCES Client DEFAULT 0,
- JobStatus CHAR NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME DEFAULT 0,
- EndTime DATETIME DEFAULT 0,
- RealEndTime DATETIME DEFAULT 0,
- JobTDate BIGINT UNSIGNED DEFAULT 0,
- VolSessionId INTEGER UNSIGNED DEFAULT 0,
- VolSessionTime INTEGER UNSIGNED DEFAULT 0,
- JobFiles INTEGER UNSIGNED DEFAULT 0,
- JobBytes BIGINT UNSIGNED DEFAULT 0,
- ReadBytes BIGINT UNSIGNED DEFAULT 0,
- JobErrors INTEGER UNSIGNED DEFAULT 0,
- JobMissingFiles INTEGER UNSIGNED DEFAULT 0,
- PoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- FileSetId INTEGER UNSIGNED REFERENCES FileSet DEFAULT 0,
- PriorJobId INTEGER UNSIGNED REFERENCES Job DEFAULT 0,
- PurgedFiles TINYINT DEFAULT 0,
- HasBase TINYINT DEFAULT 0,
- PRIMARY KEY(JobId)
- );
-CREATE INDEX inx6 ON Job (Name);
-
-INSERT INTO Job (JobId, Job, Name, Type, Level, ClientId, JobStatus,
-SchedTime, StartTime, EndTime, RealEndTime, JobTDate, VolSessionId,
-VolSessionTime, JobFiles, JobBytes, JobErrors, JobMissingFiles,
-PoolId, FileSetId, PriorJobId, PurgedFiles, HasBase) SELECT
-JobId, Job, Name, Type, Level, ClientId, JobStatus, SchedTime, StartTime,
-EndTime, RealEndTime, JobTDate, VolSessionId, VolSessionTime, JobFiles,
-JobBytes, JobErrors, JobMissingFiles, PoolId, FileSetId, PriorJobId,
-PurgedFiles, HasBase FROM Job_backup;
-
-DROP TABLE Job_backup;
-
-
--- ----------------------------------------------------------------
--- New ActionOnPurge field
-
-CREATE TEMPORARY TABLE pool_backup AS SELECT * FROM Pool;
-DROP TABLE Pool;
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
-CREATE TABLE Pool (
- PoolId INTEGER,
- Name VARCHAR(128) NOT NULL,
- NumVols INTEGER UNSIGNED DEFAULT 0,
- MaxVols INTEGER UNSIGNED DEFAULT 0,
- UseOnce TINYINT DEFAULT 0,
- UseCatalog TINYINT DEFAULT 1,
- AcceptAnyVolume TINYINT DEFAULT 0,
- VolRetention BIGINT UNSIGNED DEFAULT 0,
- VolUseDuration BIGINT UNSIGNED DEFAULT 0,
- MaxVolJobs INTEGER UNSIGNED DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
- AutoPrune TINYINT DEFAULT 0,
- Recycle TINYINT DEFAULT 0,
- ActionOnPurge TINYINT DEFAULT 0,
- PoolType VARCHAR(20) NOT NULL,
- LabelType TINYINT DEFAULT 0,
- LabelFormat VARCHAR(128) NOT NULL,
- Enabled TINYINT DEFAULT 1,
- ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
- MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
- MigrationTime BIGINT UNSIGNED DEFAULT 0,
- UNIQUE (Name),
- PRIMARY KEY (PoolId)
- );
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
-INSERT INTO Pool (PoolId, Name, NumVols, MaxVols, UseOnce, UseCatalog,
-AcceptAnyVolume, VolRetention, VolUseDuration, MaxVolJobs, MaxVolFiles,
-MaxVolBytes, AutoPrune, Recycle, PoolType, LabelType,
-LabelFormat, Enabled, ScratchPoolId, RecyclePoolId, NextPoolId,
-MigrationHighBytes, MigrationLowBytes, MigrationTime)
-SELECT PoolId, Name, NumVols, MaxVols, UseOnce, UseCatalog, AcceptAnyVolume,
-VolRetention, VolUseDuration, MaxVolJobs, MaxVolFiles, MaxVolBytes, AutoPrune,
-Recycle, PoolType, LabelType, LabelFormat, Enabled,
-ScratchPoolId, RecyclePoolId, NextPoolId, MigrationHighBytes,
-MigrationLowBytes, MigrationTime FROM pool_backup;
-
-DROP TABLE pool_backup;
-
--- ----------------------------------------------------------------
--- New ActionOnPurge field
-
-CREATE TEMPORARY TABLE media_backup AS SELECT * FROM Media;
-DROP TABLE Media;
-
-CREATE TABLE Media (
- MediaId INTEGER,
- VolumeName VARCHAR(128) NOT NULL,
- Slot INTEGER DEFAULT 0,
- PoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- MediaType VARCHAR(128) NOT NULL,
- MediaTypeId INTEGER UNSIGNED REFERENCES MediaType DEFAULT 0,
- LabelType TINYINT DEFAULT 0,
- FirstWritten DATETIME DEFAULT 0,
- LastWritten DATETIME DEFAULT 0,
- LabelDate DATETIME DEFAULT 0,
- VolJobs INTEGER UNSIGNED DEFAULT 0,
- VolFiles INTEGER UNSIGNED DEFAULT 0,
- VolBlocks INTEGER UNSIGNED DEFAULT 0,
- VolMounts INTEGER UNSIGNED DEFAULT 0,
- VolBytes BIGINT UNSIGNED DEFAULT 0,
- VolParts INTEGER UNSIGNED DEFAULT 0,
- VolErrors INTEGER UNSIGNED DEFAULT 0,
- VolWrites INTEGER UNSIGNED DEFAULT 0,
- VolCapacityBytes BIGINT UNSIGNED DEFAULT 0,
- VolStatus VARCHAR(20) NOT NULL,
- Enabled TINYINT DEFAULT 1,
- Recycle TINYINT DEFAULT 0,
- ActionOnPurge TINYINT DEFAULT 0,
- VolRetention BIGINT UNSIGNED DEFAULT 0,
- VolUseDuration BIGINT UNSIGNED DEFAULT 0,
- MaxVolJobs INTEGER UNSIGNED DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
- InChanger TINYINT DEFAULT 0,
- StorageId INTEGER UNSIGNED REFERENCES Storage DEFAULT 0,
- DeviceId INTEGER UNSIGNED REFERENCES Device DEFAULT 0,
- MediaAddressing TINYINT DEFAULT 0,
- VolReadTime BIGINT UNSIGNED DEFAULT 0,
- VolWriteTime BIGINT UNSIGNED DEFAULT 0,
- EndFile INTEGER UNSIGNED DEFAULT 0,
- EndBlock INTEGER UNSIGNED DEFAULT 0,
- LocationId INTEGER UNSIGNED REFERENCES Location DEFAULT 0,
- RecycleCount INTEGER UNSIGNED DEFAULT 0,
- InitialWrite DATETIME DEFAULT 0,
- ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- Comment TEXT,
- PRIMARY KEY(MediaId)
- );
-
-CREATE INDEX inx8 ON Media (PoolId);
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
-INSERT INTO Media (
- MediaId, VolumeName, Slot, PoolId, MediaType, MediaTypeId,
- LabelType, FirstWritten, LastWritten, LabelDate, VolJobs,
- VolFiles, VolBlocks, VolMounts, VolBytes, VolParts, VolErrors,
- VolWrites, VolCapacityBytes, VolStatus, Enabled, Recycle,
- VolRetention, VolUseDuration, MaxVolJobs,
- MaxVolFiles, MaxVolBytes, InChanger, StorageId, DeviceId,
- MediaAddressing, VolReadTime, VolWriteTime, EndFile, EndBlock,
- LocationId, RecycleCount, InitialWrite, ScratchPoolId,
- RecyclePoolId, Comment)
-SELECT MediaId, VolumeName, Slot, PoolId, MediaType, MediaTypeId,
- LabelType, FirstWritten, LastWritten, LabelDate, VolJobs,
- VolFiles, VolBlocks, VolMounts, VolBytes, VolParts, VolErrors,
- VolWrites, VolCapacityBytes, VolStatus, Enabled, Recycle,
- VolRetention, VolUseDuration, MaxVolJobs,
- MaxVolFiles, MaxVolBytes, InChanger, StorageId, DeviceId,
- MediaAddressing, VolReadTime, VolWriteTime, EndFile, EndBlock,
- LocationId, RecycleCount, InitialWrite, ScratchPoolId,
- RecyclePoolId, Comment FROM media_backup;
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
-DROP TABLE media_backup;
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
-UPDATE Version SET VersionId=11;
+UPDATE Version SET VersionId=12;
COMMIT;
--- If you have already this table, you can remove it with:
--- DROP TABLE JobHistory;
-
--- Create a table like Job for long term statistics
-CREATE TABLE JobHisto (
- JobId INTEGER,
- Job VARCHAR(128) NOT NULL,
- Name VARCHAR(128) NOT NULL,
- Type CHAR NOT NULL,
- Level CHAR NOT NULL,
- ClientId INTEGER REFERENCES Client DEFAULT 0,
- JobStatus CHAR NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME DEFAULT 0,
- EndTime DATETIME DEFAULT 0,
- RealEndTime DATETIME DEFAULT 0,
- JobTDate BIGINT UNSIGNED DEFAULT 0,
- VolSessionId INTEGER UNSIGNED DEFAULT 0,
- VolSessionTime INTEGER UNSIGNED DEFAULT 0,
- JobFiles INTEGER UNSIGNED DEFAULT 0,
- JobBytes BIGINT UNSIGNED DEFAULT 0,
- ReadBytes BIGINT UNSIGNED DEFAULT 0,
- JobErrors INTEGER UNSIGNED DEFAULT 0,
- JobMissingFiles INTEGER UNSIGNED DEFAULT 0,
- PoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
- FileSetId INTEGER UNSIGNED REFERENCES FileSet DEFAULT 0,
- PriorJobId INTEGER UNSIGNED REFERENCES Job DEFAULT 0,
- PurgedFiles TINYINT DEFAULT 0,
- HasBase TINYINT DEFAULT 0
- );
-CREATE INDEX inx61 ON JobHisto (StartTime);
+DROP INDEX inx4;
+DROP INDEX inx9; -- can generate errors
+CREATE INDEX file_jpf_idx ON File (JobId, PathId, FilenameId);
END-OF-DATA
if (mode == UPDATE_AND_FIX) {
db_sql_query(db, cleanup_created_job, NULL, NULL);
db_sql_query(db, cleanup_running_job, NULL, NULL);
- db_sql_query(db, "CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId )" , NULL, NULL);
}
db_close_database(NULL, db);