#!/bin/sh
#
-# shell script to create Bacula PostgreSQL tables
+# shell script to create Bacula Ingres tables
#
bindir=@SQL_BINDIR@
-db_name=${db_name:-@db_name@}
+PATH="$bindir:$PATH"
+#db_name=${db_name:-@db_name@}
+#db_user=${db_user:-@db_user@}
+db_name=@db_name@
+db_user=@db_user@
-sql $* ${db_name} <<END-OF-DATA
+sql $* -u${db_user} ${db_name} <<END-OF-DATA
SET AUTOCOMMIT ON\g
primary key (filenameid)
);
-ALTER TABLE filename ALTER COLUMN name SET STATISTICS 1000;
+-- ALTER TABLE filename ALTER COLUMN name SET STATISTICS 1000;
CREATE UNIQUE INDEX filename_name_idx on filename (name);
CREATE SEQUENCE path_seq;
primary key (pathid)
);
-ALTER TABLE path ALTER COLUMN path SET STATISTICS 1000;
+-- ALTER TABLE path ALTER COLUMN path SET STATISTICS 1000;
CREATE UNIQUE INDEX path_name_idx on path (path);
CREATE SEQUENCE file_seq;
primary key (fileid)
);
-CREATE INDEX file_jobid_idx on file (jobid);
-CREATE INDEX file_fp_idx on file (filenameid, pathid);
+CREATE INDEX file_jpfid_idx on file (jobid, pathid, filenameid);
+
+-- If you need performances, you can remove this index
+-- the database engine is able to use the composite index
+-- to find all records with a given JobId
+CREATE INDEX file_jobid_idx on file(jobid);
--
-- Add this if you have a good number of job
--
-- CREATE INDEX file_pathid_idx on file(pathid);
-- CREATE INDEX file_filenameid_idx on file(filenameid);
--- CREATE INDEX file_jpfid_idx on file (jobid, pathid, filenameid);
-CREATE SEQUENCE job_seq;
-CREATE TABLE job
+CREATE SEQUENCE Job_seq;
+CREATE TABLE Job
(
- jobid integer not null default job_seq.nextval,
- job text not null,
- name text not null,
- type char(1) not null,
- level char(1) not null,
- clientid integer default 0,
- jobstatus char(1) not null,
- schedtime timestamp without time zone,
- starttime timestamp without time zone,
- endtime timestamp without time zone,
- realendtime timestamp without time zone,
- jobtdate bigint default 0,
- volsessionid integer default 0,
- volsessiontime integer default 0,
- jobfiles integer default 0,
- jobbytes bigint default 0,
- readbytes bigint default 0,
- joberrors integer default 0,
- jobmissingfiles integer default 0,
- poolid integer default 0,
- filesetid integer default 0,
- purgedfiles smallint default 0,
- hasbase smallint default 0,
- priorjobid integer default 0,
+ JobId integer not null default Job_seq.nextval,
+ Job text not null,
+ Name text not null,
+ Type char(1) not null,
+ Level char(1) not null,
+ ClientId integer default 0,
+ JobStatus char(1) not null,
+ SchedTime timestamp without time zone,
+ StartTime timestamp without time zone,
+ EndTime timestamp without time zone,
+ RealEndTime timestamp without time zone,
+ JobTDate bigint default 0,
+ VolSessionId integer default 0,
+ volSessionTime integer default 0,
+ JobFiles integer default 0,
+ JobBytes bigint default 0,
+ ReadBytes bigint default 0,
+ JobErrors integer default 0,
+ JobMissingFiles integer default 0,
+ PoolId integer default 0,
+ FilesetId integer default 0,
+ PriorJobid integer default 0,
+ PurgedFiles smallint default 0,
+ HasBase smallint default 0,
+ HasCache smallint default 0,
+ Reviewed smallint default 0,
+ Comment text,
primary key (jobid)
);
CREATE INDEX job_name_idx on job (name);
-- Create a table like Job for long term statistics
-CREATE TABLE JobHisto (LIKE Job);
-CREATE INDEX jobhisto_idx ON jobhisto ( starttime );
+CREATE SEQUENCE JobHisto_seq;
+CREATE TABLE JobHisto
+(
+ JobId integer not null default JobHisto_seq.nextval,
+ Job text not null,
+ Name text not null,
+ Type char(1) not null,
+ Level char(1) not null,
+ ClientId integer default 0,
+ JobStatus char(1) not null,
+ SchedTime timestamp without time zone,
+ StartTime timestamp without time zone,
+ EndTime timestamp without time zone,
+ RealEndTime timestamp without time zone,
+ JobTDate bigint default 0,
+ VolSessionId integer default 0,
+ volSessionTime integer default 0,
+ JobFiles integer default 0,
+ JobBytes bigint default 0,
+ ReadBytes bigint default 0,
+ JobErrors integer default 0,
+ JobMissingFiles integer default 0,
+ PoolId integer default 0,
+ FilesetId integer default 0,
+ PriorJobid integer default 0,
+ PurgedFiles smallint default 0,
+ HasBase smallint default 0,
+ HasCache smallint default 0,
+ Reviewed smallint default 0,
+ Comment text,
+ primary key (jobid)
+);
+
+CREATE INDEX jobhisto_idx on JobHisto ( StartTime );
CREATE SEQUENCE Location_seq;
primary key (LocationId)
);
-
CREATE SEQUENCE fileset_seq;
CREATE TABLE fileset
(
CREATE SEQUENCE jobmedia_seq;
CREATE TABLE jobmedia
(
- jobmediaid integer not null default jobmedia_seq.nestval,
+ jobmediaid integer not null default jobmedia_seq.nextval,
jobid integer not null,
mediaid integer not null,
firstindex integer default 0,
startblock bigint default 0,
endblock bigint default 0,
volindex integer default 0,
- copy integer default 0,
primary key (jobmediaid)
);
create unique index media_volumename_id on media (volumename);
-
CREATE SEQUENCE MediaType_seq;
CREATE TABLE MediaType (
- MediaTypeId INTEGER DEFAULT MediaType_seq.NEXTVAL,
+ MediaTypeId INTEGER NOT NULL DEFAULT MediaType_seq.nextval,
MediaType TEXT NOT NULL,
- ReadOnly INTEGER DEFAULT 0,
+ ReadOnly INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY(MediaTypeId)
);
CREATE SEQUENCE Storage_seq;
CREATE TABLE Storage (
- StorageId INTEGER DEFAULT Storage_seq.NEXTVAL,
+ StorageId INTEGER NOT NULL DEFAULT Storage_seq.nextval,
Name TEXT NOT NULL,
- AutoChanger INTEGER DEFAULT 0,
+ AutoChanger INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY(StorageId)
);
CREATE SEQUENCE Device_seq;
CREATE TABLE Device (
- DeviceId INTEGER DEFAULT Device_seq.NEXTVAL,
+ DeviceId INTEGER NOT NULL DEFAULT Device_seq.nextval,
Name TEXT NOT NULL,
MediaTypeId INTEGER NOT NULL,
StorageId INTEGER NOT NULL,
CREATE SEQUENCE LocationLog_seq;
CREATE TABLE LocationLog (
- LocLogId INTEGER NOT NULL DEFAULT LocationLog_seq.NEXTVAL,
+ LocLogId INTEGER NOT NULL DEFAULT LocationLog_seq.nextval,
Date timestamp without time zone,
Comment TEXT NOT NULL,
MediaId INTEGER DEFAULT 0,
);
-
CREATE SEQUENCE basefiles_seq;
CREATE TABLE basefiles
(
primary key (baseid)
);
+CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId );
+
CREATE TABLE unsavedfiles
(
UnsavedId integer not null,
);
+CREATE TABLE PathHierarchy
+(
+ PathId integer NOT NULL,
+ PPathId integer NOT NULL,
+ CONSTRAINT pathhierarchy_pkey PRIMARY KEY (PathId)
+);
+
+CREATE INDEX pathhierarchy_ppathid
+ ON PathHierarchy (PPathId);
+
+CREATE TABLE PathVisibility
+(
+ PathId integer NOT NULL,
+ JobId integer NOT NULL,
+ Size int8 DEFAULT 0,
+ Files int4 DEFAULT 0,
+ CONSTRAINT pathvisibility_pkey PRIMARY KEY (JobId, PathId)
+);
+CREATE INDEX pathvisibility_jobid
+ ON PathVisibility (JobId);
+
CREATE TABLE version
(
versionid integer not null
CREATE TABLE Status (
JobStatus CHAR(1) NOT NULL,
- JobStatusLong TEXT,
+ JobStatusLong TEXT,
+ Severity int,
PRIMARY KEY (JobStatus)
);
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('C', 'Created, not yet running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('R', 'Running');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('B', 'Blocked');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('T', 'Completed successfully');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('E', 'Terminated with errors');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('e', 'Non-fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('f', 'Fatal error');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('D', 'Verify found differences');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('A', 'Canceled by user');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('F', 'Waiting for Client');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('S', 'Waiting for Storage daemon');
+\g
+
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('C', 'Created, not yet running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('R', 'Running',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('B', 'Blocked',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('T', 'Completed successfully', 10);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('E', 'Terminated with errors', 25);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('e', 'Non-fatal error',20);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('f', 'Fatal error',100);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('D', 'Verify found differences',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('A', 'Canceled by user',90);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('F', 'Waiting for Client',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('S', 'Waiting for Storage daemon',15);
INSERT INTO Status (JobStatus,JobStatusLong) VALUES
('m', 'Waiting for new media');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('M', 'Waiting for media mount');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('s', 'Waiting for storage resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('j', 'Waiting for job resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('c', 'Waiting for client resource');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('d', 'Waiting on maximum jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('t', 'Waiting on start time');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('p', 'Waiting on higher priority jobs');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('a', 'SD despooling attributes');
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('i', 'Doing batch insert file records');
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('M', 'Waiting for media mount',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('s', 'Waiting for storage resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('j', 'Waiting for job resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('c', 'Waiting for client resource',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('d', 'Waiting on maximum jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('t', 'Waiting on start time',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('p', 'Waiting on higher priority jobs',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('a', 'SD despooling attributes',15);
+INSERT INTO Status (JobStatus,JobStatusLong,Severity) VALUES
+ ('i', 'Doing batch insert file records',15);
INSERT INTO Version (VersionId) VALUES (11);
* With PostgreSQL, we can use DISTINCT ON(), but with Mysql or Sqlite,
* we need an extra join using JobTDate.
*/
-const char *select_recent_version_with_basejob[4] = {
+const char *select_recent_version_with_basejob[5] = {
/* MySQL */
"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
"File.FilenameId AS FilenameId, LStat, MD5 "
"AND T1.JobTDate = Job.JobTDate "
"AND Job.JobId = File.JobId "
"AND T1.PathId = File.PathId "
- "AND T1.FilenameId = File.FilenameId"
+ "AND T1.FilenameId = File.FilenameId",
+
+ /* Ingres (works?) */ /* The DISTINCT ON () permits to avoid extra join */
+ "SELECT DISTINCT ON (FilenameId, PathId) StartTime, JobId, FileId, "
+ "FileIndex, PathId, FilenameId, LStat, MD5 "
+ "FROM "
+ "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 "
+ "FROM File WHERE JobId IN (%s) "
+ "UNION ALL "
+ "SELECT File.FileId, File.JobId, PathId, FilenameId, "
+ "File.FileIndex, LStat, MD5 "
+ "FROM BaseFiles JOIN File USING (FileId) "
+ "WHERE BaseFiles.JobId IN (%s) "
+ ") AS T JOIN Job USING (JobId) "
+ "ORDER BY FilenameId, PathId, StartTime DESC "
+
};
/* Get the list of the last recent version with a given BaseJob jobid list */
-const char *select_recent_version[4] = {
+const char *select_recent_version[5] = {
/* MySQL */
"SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, "
"f1.PathId AS PathId, f1.FilenameId AS FilenameId, "
"AND j1.JobId IN (%s) "
"AND t1.FilenameId = f1.FilenameId "
"AND t1.PathId = f1.PathId "
- "AND j1.JobId = f1.JobId"
+ "AND j1.JobId = f1.JobId",
+
+ /* Ingres */
+ "SELECT DISTINCT ON (FilenameId, PathId) StartTime, JobId, FileId, "
+ "FileIndex, PathId, FilenameId, LStat, MD5 "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "ORDER BY FilenameId, PathId, StartTime DESC "
+
};
/* ====== ua_prune.c */
/* List of SQL commands to create temp table and indicies */
-const char *create_deltabs[4] = {
+const char *create_deltabs[5] = {
/* MySQL */
"CREATE TEMPORARY TABLE DelCandidates ("
"JobId INTEGER UNSIGNED NOT NULL, "
"PurgedFiles TINYINT, "
"FileSetId INTEGER UNSIGNED, "
"JobFiles INTEGER UNSIGNED, "
- "JobStatus CHAR)"};
+ "JobStatus CHAR)",
+ /* Ingres */
+ "DECLARE GLOBAL TEMPORARY TABLE DelCandidates ("
+ "JobId INTEGER NOT NULL, "
+ "PurgedFiles SMALLINT, "
+ "FileSetId INTEGER, "
+ "JobFiles INTEGER, "
+ "JobStatus char(1))"
+};
/* ======= ua_restore.c */
/* List Jobs where a particular file is saved */
-const char *uar_file[4] = {
+const char *uar_file[5] = {
/* Mysql */
"SELECT Job.JobId as JobId,"
"CONCAT(Path.Path,Filename.Name) as Name, "
"AND Client.ClientId=Job.ClientId "
"AND Job.JobId=File.JobId AND File.FileIndex > 0 "
"AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"};
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+ /* Ingres */
+ "SELECT Job.JobId as JobId,"
+ "Path.Path||Filename.Name as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId AND File.FileIndex > 0 "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"
+ };
-const char *uar_create_temp[4] = {
+const char *uar_create_temp[5] = {
/* Mysql */
"CREATE TEMPORARY TABLE temp ("
"JobId INTEGER UNSIGNED NOT NULL,"
"VolumeName TEXT,"
"StartFile INTEGER UNSIGNED,"
"VolSessionId INTEGER UNSIGNED,"
- "VolSessionTime INTEGER UNSIGNED)"};
+ "VolSessionTime INTEGER UNSIGNED)",
+ /* Ingres */
+ "DECLARE GLOBAL TEMPORARY TABLE temp ("
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT,"
+ "ClientId INTEGER,"
+ "Level CHAR,"
+ "JobFiles INTEGER,"
+ "JobBytes BIGINT,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER,"
+ "VolSessionId INTEGER,"
+ "VolSessionTime INTEGER)"
+ };
-const char *uar_create_temp1[4] = {
+const char *uar_create_temp1[5] = {
/* Mysql */
"CREATE TEMPORARY TABLE temp1 ("
"JobId INTEGER UNSIGNED NOT NULL,"
/* SQLite3 */
"CREATE TEMPORARY TABLE temp1 ("
"JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED)"};
+ "JobTDate BIGINT UNSIGNED)",
+ /* Ingres */
+ "DECLARE GLOBAL TEMPORARY TABLE temp1 ("
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT)"
+ };
/* Query to get all files in a directory -- no recursing
* Note, for PostgreSQL since it respects the "Single Value
* for each time it was backed up.
*/
-const char *uar_jobid_fileindex_from_dir[4] = {
+const char *uar_jobid_fileindex_from_dir[5] = {
/* Mysql */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Job.ClientId=Client.ClientId "
"AND Path.PathId=File.Pathid "
"AND Filename.FilenameId=File.FilenameId "
- "GROUP BY File.FileIndex "};
+ "GROUP BY File.FileIndex ",
+ /* Ingres */
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId IN (%s) "
+ "AND Job.JobId=File.JobId "
+ "AND Path.Path='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.Pathid "
+ "AND Filename.FilenameId=File.FilenameId"
+ };
-const char *sql_get_max_connections[4] = {
+const char *sql_get_max_connections[5] = {
/* Mysql */
"SHOW VARIABLES LIKE 'max_connections'",
/* Postgresql */
/* SQLite */
"SELECT 0",
/* SQLite3 */
+ "SELECT 0",
+ /* Ingres (TODO) */
"SELECT 0"
};
/* Row number of the max_connections setting */
-const uint32_t sql_get_max_connections_index[4] = {
+const uint32_t sql_get_max_connections_index[5] = {
/* Mysql */
1,
/* Postgresql */
/* SQLite */
0,
/* SQLite3 */
+ 0,
+ /* Ingres (TODO) */
0
};