#define OSDependentInit() InitWinAPIWrapper()
#undef ENABLE_NLS
+
#if defined(BUILDING_DLL)
# define DLL_IMP_EXP _declspec(dllexport)
#elif defined(USING_DLL)
# define DLL_IMP_EXP
#endif
+#if defined(USING_CATS)
+# define CATS_IMP_EXP _declspec(dllimport)
+#else
+# define CATS_IMP_EXP
+#endif
+
#else
#define DLL_IMP_EXP
+#define CATS_IMP_EXP
#define OSDependentInit()
#define tape_open open
LIBSRCS = mysql.c bdb.c bdb_create.c bdb_get.c bdb_update.c \
bdb_delete.c bdb_find.c bdb_list.c \
- sql.c sql_create.c sql_delete.c sql_find.c \
+ sql.c sql_cmds.c sql_create.c sql_delete.c sql_find.c \
sql_get.c sql_list.c sql_update.c sqlite.c \
postgresql.c
LIBOBJS = mysql.o bdb.o bdb_create.o bdb_get.o bdb_update.o \
bdb_delete.o bdb_find.o bdb_list.o \
- sql.o sql_create.o sql_delete.o sql_find.o \
+ sql.o sql_cmds.o sql_create.o sql_delete.o sql_find.o \
sql_get.o sql_list.o sql_update.o sqlite.o \
postgresql.o
uint32_t bacula_db_version = 0;
-/* Forward referenced functions */
-
-extern const char *working_directory;
-
/* List of open databases */
static BQUEUE db_list = {&db_list, &db_list};
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
#ifdef HAVE_BACULA_DB
-/* Forward referenced functions */
-
/* -----------------------------------------------------------------------
*
* Bacula specific defines and subroutines
return 1;
}
+void db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
+{
+ return;
+}
#endif /* HAVE_BACULA_DB */
#include "protos.h"
#include "jcr.h"
+#include "sql_cmds.h"
/*
* Some functions exported by sql.c for use within the
* cats directory.
*/
-void list_result(B_DB *mdb, DB_LIST_HANDLER *send, void *ctx, e_list_type type);
+void list_result(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *send, void *ctx, e_list_type type);
void list_dashes(B_DB *mdb, DB_LIST_HANDLER *send, void *ctx);
int get_sql_record_max(JCR *jcr, B_DB *mdb);
bool check_tables_version(JCR *jcr, B_DB *mdb);
void _db_unlock(const char *file, int line, B_DB *mdb);
void _db_lock(const char *file, int line, B_DB *mdb);
+void print_dashes(B_DB *mdb);
+void print_result(B_DB *mdb);
+int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
+int InsertDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
+int DeleteDB(const char *file, int line, JCR *jcr, B_DB *db, char *delete_cmd);
+int UpdateDB(const char *file, int line, JCR *jcr, B_DB *db, char *update_cmd);
+void split_path_and_file(JCR *jcr, B_DB *mdb, const char *fname);
#endif /* __SQL_H_ */
--- /dev/null
+/*
+ *
+ * This file contains all the SQL commands issued by the Director
+ *
+ * Kern Sibbald, July MMII
+ *
+ * Version $Id$
+ */
+/*
+ Copyright (C) 2002-2006 Kern Sibbald
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ version 2 as amended with additional clauses defined in the
+ file LICENSE in the main source directory.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ the file LICENSE for additional details.
+
+ */
+
+#include "bacula.h"
+#include "cats.h"
+
+/* For ua_cmds.c */
+const char *list_pool = "SELECT * FROM Pool WHERE PoolId=%s";
+
+/* For ua_dotcmds.c */
+const char *client_backups =
+ "SELECT DISTINCT Job.JobId,Client.Name as Client,Level,StartTime,"
+ "JobFiles,JobBytes,VolumeName,MediaType,FileSet"
+ " FROM Client,Job,JobMedia,Media,FileSet"
+ " WHERE Client.Name='%s'"
+ " AND FileSet='%s'"
+ " AND Client.ClientId=Job.ClientId"
+ " AND JobStatus='T' AND Type='B'"
+ " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId"
+ " AND Job.FileSetId=FileSet.FileSetId"
+ " ORDER BY Job.StartTime";
+
+
+/* ====== ua_prune.c */
+
+const char *del_File = "DELETE FROM File WHERE JobId=%s";
+const char *upd_Purged = "UPDATE Job Set PurgedFiles=1 WHERE JobId=%s";
+const char *cnt_DelCand = "SELECT count(*) FROM DelCandidates";
+const char *del_Job = "DELETE FROM Job WHERE JobId=%s";
+const char *del_MAC = "DELETE FROM MAC WHERE JobId=%s";
+const char *del_JobMedia = "DELETE FROM JobMedia WHERE JobId=%s";
+const char *cnt_JobMedia = "SELECT count(*) FROM JobMedia WHERE MediaId=%s";
+const char *sel_JobMedia = "SELECT JobId FROM JobMedia WHERE MediaId=%s";
+
+/* Select JobIds for File deletion. */
+const char *select_job =
+ "SELECT JobId from Job "
+ "WHERE JobTDate<%s "
+ "AND ClientId=%s "
+ "AND PurgedFiles=0";
+
+/* Delete temp tables and indexes */
+const char *drop_deltabs[] = {
+ "DROP TABLE DelCandidates",
+ "DROP INDEX DelInx1",
+ NULL};
+
+
+/* List of SQL commands to create temp table and indicies */
+const char *create_deltabs[] = {
+ "CREATE TEMPORARY TABLE DelCandidates ("
+#if defined(HAVE_MYSQL)
+ "JobId INTEGER UNSIGNED NOT NULL, "
+ "PurgedFiles TINYINT, "
+ "FileSetId INTEGER UNSIGNED, "
+ "JobFiles INTEGER UNSIGNED, "
+ "JobStatus BINARY(1))",
+#elif defined(HAVE_POSTGRESQL)
+ "JobId INTEGER NOT NULL, "
+ "PurgedFiles SMALLINT, "
+ "FileSetId INTEGER, "
+ "JobFiles INTEGER, "
+ "JobStatus char(1))",
+#else
+ "JobId INTEGER UNSIGNED NOT NULL, "
+ "PurgedFiles TINYINT, "
+ "FileSetId INTEGER UNSIGNED, "
+ "JobFiles INTEGER UNSIGNED, "
+ "JobStatus CHAR)",
+#endif
+ "CREATE INDEX DelInx1 ON DelCandidates (JobId)",
+ NULL};
+
+/* Fill candidates table with all Jobs subject to being deleted.
+ * This is used for pruning Jobs (first the files, then the Jobs).
+ */
+const char *insert_delcand =
+ "INSERT INTO DelCandidates "
+ "SELECT JobId,PurgedFiles,FileSetId,JobFiles,JobStatus FROM Job "
+ "WHERE Type='%c' "
+ "AND JobTDate<%s "
+ "AND ClientId=%s";
+
+/* Select Jobs from the DelCandidates table that have a
+ * more recent backup -- i.e. are not the only backup.
+ * This is the list of Jobs to delete for a Backup Job.
+ * At the same time, we select "orphanned" jobs
+ * (i.e. no files, ...) for deletion.
+ */
+const char *select_backup_del =
+ "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
+ "FROM Job,DelCandidates "
+ "WHERE (Job.JobTDate<%s AND ((DelCandidates.JobFiles=0) OR "
+ "(DelCandidates.JobStatus!='T'))) OR "
+ "(Job.JobTDate>%s "
+ "AND Job.ClientId=%s "
+ "AND Job.Level='F' AND Job.JobStatus='T' AND Job.Type='B' "
+ "AND Job.FileSetId=DelCandidates.FileSetId)";
+
+/* Select Jobs from the DelCandidates table that have a
+ * more recent InitCatalog -- i.e. are not the only InitCatalog
+ * This is the list of Jobs to delete for a Verify Job.
+ */
+const char *select_verify_del =
+ "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
+ "FROM Job,DelCandidates "
+ "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
+ "(Job.JobTDate>%s "
+ "AND Job.ClientId=%s "
+ "AND Job.Type='V' AND Job.Level='V' AND Job.JobStatus='T' "
+ "AND Job.FileSetId=DelCandidates.FileSetId)";
+
+
+/* Select Jobs from the DelCandidates table.
+ * This is the list of Jobs to delete for a Restore Job.
+ */
+const char *select_restore_del =
+ "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
+ "FROM Job,DelCandidates "
+ "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
+ "(Job.JobTDate>%s "
+ "AND Job.ClientId=%s "
+ "AND Job.Type='R')";
+
+/* Select Jobs from the DelCandidates table.
+ * This is the list of Jobs to delete for an Admin Job.
+ */
+const char *select_admin_del =
+ "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
+ "FROM Job,DelCandidates "
+ "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
+ "(Job.JobTDate>%s "
+ "AND Job.ClientId=%s "
+ "AND Job.Type='D')";
+
+
+/* ======= ua_restore.c */
+const char *uar_count_files =
+ "SELECT JobFiles FROM Job WHERE JobId=%s";
+
+/* List last 20 Jobs */
+const char *uar_list_jobs =
+ "SELECT JobId,Client.Name as Client,StartTime,Level as "
+ "JobLevel,JobFiles,JobBytes "
+ "FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus='T' "
+ "AND Type='B' ORDER BY StartTime DESC LIMIT 20";
+
+#ifdef HAVE_MYSQL
+/* MYSQL IS NOT STANDARD SQL !!!!! */
+/* List Jobs where a particular file is saved */
+const char *uar_file =
+ "SELECT Job.JobId as JobId,"
+ "CONCAT(Path.Path,Filename.Name) as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
+#else
+/* List Jobs where a particular file is saved */
+const char *uar_file =
+ "SELECT Job.JobId as JobId,"
+ "Path.Path||Filename.Name as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
+#endif
+
+
+/*
+ * Find all files for a particular JobId and insert them into
+ * the tree during a restore.
+ */
+const char *uar_sel_files =
+ "SELECT Path.Path,Filename.Name,FileIndex,JobId,LStat "
+ "FROM File,Filename,Path "
+ "WHERE File.JobId=%s AND Filename.FilenameId=File.FilenameId "
+ "AND Path.PathId=File.PathId";
+
+const char *uar_del_temp = "DROP TABLE temp";
+const char *uar_del_temp1 = "DROP TABLE temp1";
+
+const char *uar_create_temp =
+ "CREATE TEMPORARY TABLE temp ("
+#ifdef HAVE_POSTGRESQL
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT,"
+ "ClientId INTEGER,"
+ "Level CHAR,"
+ "JobFiles INTEGER,"
+ "JobBytes BIGINT,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER,"
+ "VolSessionId INTEGER,"
+ "VolSessionTime INTEGER)";
+#else
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED,"
+ "ClientId INTEGER UNSIGNED,"
+ "Level CHAR,"
+ "JobFiles INTEGER UNSIGNED,"
+ "JobBytes BIGINT UNSIGNED,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER UNSIGNED,"
+ "VolSessionId INTEGER UNSIGNED,"
+ "VolSessionTime INTEGER UNSIGNED)";
+#endif
+
+const char *uar_create_temp1 =
+ "CREATE TEMPORARY TABLE temp1 ("
+#ifdef HAVE_POSTGRESQL
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT)";
+#else
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED)";
+#endif
+
+const char *uar_last_full =
+ "INSERT INTO temp1 SELECT Job.JobId,JobTdate "
+ "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s "
+ "AND Job.ClientId=%s "
+ "AND Job.StartTime<'%s' "
+ "AND Level='F' AND JobStatus='T' AND Type='B' "
+ "AND JobMedia.JobId=Job.JobId "
+ "AND JobMedia.MediaId=Media.MediaId "
+ "AND Job.FileSetId=FileSet.FileSetId "
+ "AND FileSet.FileSet='%s' "
+ "%s"
+ "ORDER BY Job.JobTDate DESC LIMIT 1";
+
+const char *uar_full =
+ "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,"
+ "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes,"
+ "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime "
+ "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId "
+ "AND Level='F' AND JobStatus='T' AND Type='B' "
+ "AND JobMedia.JobId=Job.JobId "
+ "AND JobMedia.MediaId=Media.MediaId";
+
+const char *uar_dif =
+ "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
+ "Job.Level,Job.JobFiles,Job.JobBytes,"
+ "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
+ "Job.VolSessionId,Job.VolSessionTime "
+ "FROM Job,JobMedia,Media,FileSet "
+ "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
+ "AND Job.ClientId=%s "
+ "AND JobMedia.JobId=Job.JobId "
+ "AND JobMedia.MediaId=Media.MediaId "
+ "AND Job.Level='D' AND JobStatus='T' AND Type='B' "
+ "AND Job.FileSetId=FileSet.FileSetId "
+ "AND FileSet.FileSet='%s' "
+ "%s"
+ "ORDER BY Job.JobTDate DESC LIMIT 1";
+
+const char *uar_inc =
+ "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
+ "Job.Level,Job.JobFiles,Job.JobBytes,"
+ "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
+ "Job.VolSessionId,Job.VolSessionTime "
+ "FROM Job,JobMedia,Media,FileSet "
+ "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
+ "AND Job.ClientId=%s "
+ "AND JobMedia.JobId=Job.JobId "
+ "AND JobMedia.MediaId=Media.MediaId "
+ "AND Job.Level='I' AND JobStatus='T' AND Type='B' "
+ "AND Job.FileSetId=FileSet.FileSetId "
+ "AND FileSet.FileSet='%s' "
+ "%s";
+
+#ifdef HAVE_POSTGRESQL
+/* Note, the PostgreSQL will have a much uglier looking
+ * list since it cannot do GROUP BY of different values.
+ */
+const char *uar_list_temp =
+ "SELECT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile"
+ " FROM temp"
+ " ORDER BY StartTime,StartFile ASC";
+#else
+const char *uar_list_temp =
+ "SELECT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile"
+ " FROM temp"
+ " GROUP BY JobId ORDER BY StartTime,StartFile ASC";
+#endif
+
+
+const char *uar_sel_jobid_temp = "SELECT JobId FROM temp ORDER BY StartTime ASC";
+
+const char *uar_sel_all_temp1 = "SELECT * FROM temp1";
+
+const char *uar_sel_all_temp = "SELECT * FROM temp";
+
+
+
+/* Select FileSet names for this Client */
+const char *uar_sel_fileset =
+ "SELECT DISTINCT FileSet.FileSet FROM Job,"
+ "Client,FileSet WHERE Job.FileSetId=FileSet.FileSetId "
+ "AND Job.ClientId=%s AND Client.ClientId=%s "
+ "ORDER BY FileSet.FileSet";
+
+/* Find MediaType used by this Job */
+const char *uar_mediatype =
+ "SELECT MediaType FROM JobMedia,Media WHERE JobMedia.JobId=%s "
+ "AND JobMedia.MediaId=Media.MediaId";
+
+/*
+ * Find JobId, FileIndex for a given path/file and date
+ * for use when inserting individual files into the tree.
+ */
+const char *uar_jobid_fileindex =
+ "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId=File.JobId "
+ "AND Job.StartTime<'%s' "
+ "AND Path.Path='%s' "
+ "AND Filename.Name='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.PathId "
+ "AND Filename.FilenameId=File.FilenameId "
+ "ORDER BY Job.StartTime DESC LIMIT 1";
+
+const char *uar_jobids_fileindex =
+ "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId IN (%s) "
+ "AND Job.JobId=File.JobId "
+ "AND Job.StartTime<'%s' "
+ "AND Path.Path='%s' "
+ "AND Filename.Name='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.PathId "
+ "AND Filename.FilenameId=File.FilenameId "
+ "ORDER BY Job.StartTime DESC LIMIT 1";
+
+/* Query to get all files in a directory -- no recursing
+ * Note, for PostgreSQL since it respects the "Single Value
+ * rule", the results of the SELECT will be unoptimized.
+ * I.e. the same file will be restored multiple times, once
+ * for each time it was backed up.
+ */
+
+#ifdef HAVE_POSTGRESQL
+const char *uar_jobid_fileindex_from_dir =
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId IN (%s) "
+ "AND Job.JobId=File.JobId "
+ "AND Path.Path='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.Pathid "
+ "AND Filename.FilenameId=File.FilenameId";
+#else
+const char *uar_jobid_fileindex_from_dir =
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId IN (%s) "
+ "AND Job.JobId=File.JobId "
+ "AND Path.Path='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.Pathid "
+ "AND Filename.FilenameId=File.FilenameId "
+ "GROUP BY File.FileIndex ";
+#endif
+
+/* Query to get list of files from table -- presuably built by an external program */
+const char *uar_jobid_fileindex_from_table =
+ "SELECT JobId, FileIndex from %s";
--- /dev/null
+/*
+ Copyright (C) 2000-2006 Kern Sibbald
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ version 2 as amended with additional clauses defined in the
+ file LICENSE in the main source directory.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ the file LICENSE for additional details.
+
+ */
+
+extern const char CATS_IMP_EXP *client_backups;
+extern const char CATS_IMP_EXP *list_pool;
+extern const char CATS_IMP_EXP *drop_deltabs[];
+extern const char CATS_IMP_EXP *create_deltabs[];
+extern const char CATS_IMP_EXP *insert_delcand;
+extern const char CATS_IMP_EXP *select_backup_del;
+extern const char CATS_IMP_EXP *select_verify_del;
+extern const char CATS_IMP_EXP *select_restore_del;
+extern const char CATS_IMP_EXP *select_admin_del;
+extern const char CATS_IMP_EXP *select_job;
+extern const char CATS_IMP_EXP *del_File;
+extern const char CATS_IMP_EXP *cnt_File;
+extern const char CATS_IMP_EXP *cnt_DelCand;
+extern const char CATS_IMP_EXP *del_Job;
+extern const char CATS_IMP_EXP *del_MAC;
+extern const char CATS_IMP_EXP *del_JobMedia;
+extern const char CATS_IMP_EXP *cnt_JobMedia;
+extern const char CATS_IMP_EXP *sel_JobMedia;
+extern const char CATS_IMP_EXP *upd_Purged;
+
+extern const char CATS_IMP_EXP *uar_list_jobs;
+extern const char CATS_IMP_EXP *uar_file;
+extern const char CATS_IMP_EXP *uar_count_files;
+extern const char CATS_IMP_EXP *uar_sel_files;
+extern const char CATS_IMP_EXP *uar_del_temp;
+extern const char CATS_IMP_EXP *uar_del_temp1;
+extern const char CATS_IMP_EXP *uar_create_temp;
+extern const char CATS_IMP_EXP *uar_create_temp1;
+extern const char CATS_IMP_EXP *uar_last_full;
+extern const char CATS_IMP_EXP *uar_full;
+extern const char CATS_IMP_EXP *uar_inc;
+extern const char CATS_IMP_EXP *uar_list_temp;
+extern const char CATS_IMP_EXP *uar_sel_all_temp1;
+extern const char CATS_IMP_EXP *uar_sel_fileset;
+extern const char CATS_IMP_EXP *uar_mediatype;
+extern const char CATS_IMP_EXP *uar_jobid_fileindex;
+extern const char CATS_IMP_EXP *uar_dif;
+extern const char CATS_IMP_EXP *uar_sel_all_temp;
+extern const char CATS_IMP_EXP *uar_count_files;
+extern const char CATS_IMP_EXP *uar_jobids_fileindex;
+extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir;
+extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table;
+extern const char CATS_IMP_EXP *uar_sel_jobid_temp;
static int db_create_path_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-/* Imported subroutines */
-extern void print_dashes(B_DB *mdb);
-extern void print_result(B_DB *mdb);
-extern int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-extern int InsertDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-extern int UpdateDB(const char *file, int line, JCR *jcr, B_DB *db, char *update_cmd);
-extern void split_path_and_file(JCR *jcr, B_DB *mdb, const char *fname);
-
-
/* Create a new record for the Job
* Returns: false on failure
* true on success
* -----------------------------------------------------------------------
*/
-/* Imported subroutines */
-extern void print_dashes(B_DB *mdb);
-extern void print_result(B_DB *mdb);
-extern int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-extern int DeleteDB(const char *file, int line, JCR *jcr, B_DB *db, char *delete_cmd);
-
/*
* Delete Pool record, must also delete all associated
* Media records.
* -----------------------------------------------------------------------
*/
-/* Imported subroutines */
-extern void print_result(B_DB *mdb);
-extern int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-
/*
* Find job start time if JobId specified, otherwise
* find last full save for Incremental and Differential saves.
static int db_get_path_record(JCR *jcr, B_DB *mdb);
-/* Imported subroutines */
-extern void print_result(B_DB *mdb);
-extern int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-extern void split_path_and_file(JCR *jcr, B_DB *mdb, const char *fname);
-
-
-
/*
* Given a full filename (with path), look up the File record
* (with attributes) in the database.
* -----------------------------------------------------------------------
*/
-/* Imported subroutines */
-extern void list_result(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type);
-extern int QueryDB(const char *file, int line, JCR *jcr, B_DB *db, char *select_cmd);
-
-
/*
* Submit general SQL query
*/
* -----------------------------------------------------------------------
*/
-/* Imported subroutines */
-extern void print_result(B_DB *mdb);
-extern int UpdateDB(const char *file, int line, JCR *jcr, B_DB *db, char *update_cmd);
-
/* -----------------------------------------------------------------------
*
* Generic Routines (or almost generic)
}
}
-#else
-
-void
-db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- /* DUMMY func for Bacula_DB */
- return;
-}
-
#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL*/
* -----------------------------------------------------------------------
*/
-extern const char *working_directory;
-
/* List of open databases */
static BQUEUE db_list = {&db_list, &db_list};
mountreq.c msgchan.c next_vol.c newvol.c \
pythondir.c \
recycle.c restore.c run_conf.c \
- scheduler.c sql_cmds.c \
+ scheduler.c \
ua_acl.c ua_cmds.c ua_dotcmds.c \
ua_query.c \
ua_input.c ua_label.c ua_output.c ua_prune.c \
mountreq.o msgchan.o next_vol.o newvol.o \
pythondir.o \
recycle.o restore.o run_conf.o \
- scheduler.o sql_cmds.o \
+ scheduler.o \
ua_acl.o ua_cmds.o ua_dotcmds.o \
ua_query.o \
ua_input.o ua_label.o ua_output.o ua_prune.o \
+++ /dev/null
-/*
- *
- * This file contains all the SQL commands issued by the Director
- *
- * Kern Sibbald, July MMII
- *
- * Version $Id$
- */
-/*
- Copyright (C) 2002-2006 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-#include "bacula.h"
-#include "dird.h"
-
-/* For ua_cmds.c */
-const char *list_pool = "SELECT * FROM Pool WHERE PoolId=%s";
-
-/* For ua_dotcmds.c */
-const char *client_backups =
- "SELECT DISTINCT Job.JobId,Client.Name as Client,Level,StartTime,"
- "JobFiles,JobBytes,VolumeName,MediaType,FileSet"
- " FROM Client,Job,JobMedia,Media,FileSet"
- " WHERE Client.Name='%s'"
- " AND FileSet='%s'"
- " AND Client.ClientId=Job.ClientId"
- " AND JobStatus='T' AND Type='B'"
- " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId"
- " AND Job.FileSetId=FileSet.FileSetId"
- " ORDER BY Job.StartTime";
-
-
-/* ====== ua_prune.c */
-
-const char *del_File = "DELETE FROM File WHERE JobId=%s";
-const char *upd_Purged = "UPDATE Job Set PurgedFiles=1 WHERE JobId=%s";
-const char *cnt_DelCand = "SELECT count(*) FROM DelCandidates";
-const char *del_Job = "DELETE FROM Job WHERE JobId=%s";
-const char *del_MAC = "DELETE FROM MAC WHERE JobId=%s";
-const char *del_JobMedia = "DELETE FROM JobMedia WHERE JobId=%s";
-const char *cnt_JobMedia = "SELECT count(*) FROM JobMedia WHERE MediaId=%s";
-const char *sel_JobMedia = "SELECT JobId FROM JobMedia WHERE MediaId=%s";
-
-/* Select JobIds for File deletion. */
-const char *select_job =
- "SELECT JobId from Job "
- "WHERE JobTDate<%s "
- "AND ClientId=%s "
- "AND PurgedFiles=0";
-
-/* Delete temp tables and indexes */
-const char *drop_deltabs[] = {
- "DROP TABLE DelCandidates",
- "DROP INDEX DelInx1",
- NULL};
-
-
-/* List of SQL commands to create temp table and indicies */
-const char *create_deltabs[] = {
- "CREATE TEMPORARY TABLE DelCandidates ("
-#if defined(HAVE_MYSQL)
- "JobId INTEGER UNSIGNED NOT NULL, "
- "PurgedFiles TINYINT, "
- "FileSetId INTEGER UNSIGNED, "
- "JobFiles INTEGER UNSIGNED, "
- "JobStatus BINARY(1))",
-#elif defined(HAVE_POSTGRESQL)
- "JobId INTEGER NOT NULL, "
- "PurgedFiles SMALLINT, "
- "FileSetId INTEGER, "
- "JobFiles INTEGER, "
- "JobStatus char(1))",
-#else
- "JobId INTEGER UNSIGNED NOT NULL, "
- "PurgedFiles TINYINT, "
- "FileSetId INTEGER UNSIGNED, "
- "JobFiles INTEGER UNSIGNED, "
- "JobStatus CHAR)",
-#endif
- "CREATE INDEX DelInx1 ON DelCandidates (JobId)",
- NULL};
-
-/* Fill candidates table with all Jobs subject to being deleted.
- * This is used for pruning Jobs (first the files, then the Jobs).
- */
-const char *insert_delcand =
- "INSERT INTO DelCandidates "
- "SELECT JobId,PurgedFiles,FileSetId,JobFiles,JobStatus FROM Job "
- "WHERE Type='%c' "
- "AND JobTDate<%s "
- "AND ClientId=%s";
-
-/* Select Jobs from the DelCandidates table that have a
- * more recent backup -- i.e. are not the only backup.
- * This is the list of Jobs to delete for a Backup Job.
- * At the same time, we select "orphanned" jobs
- * (i.e. no files, ...) for deletion.
- */
-const char *select_backup_del =
- "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
- "FROM Job,DelCandidates "
- "WHERE (Job.JobTDate<%s AND ((DelCandidates.JobFiles=0) OR "
- "(DelCandidates.JobStatus!='T'))) OR "
- "(Job.JobTDate>%s "
- "AND Job.ClientId=%s "
- "AND Job.Level='F' AND Job.JobStatus='T' AND Job.Type='B' "
- "AND Job.FileSetId=DelCandidates.FileSetId)";
-
-/* Select Jobs from the DelCandidates table that have a
- * more recent InitCatalog -- i.e. are not the only InitCatalog
- * This is the list of Jobs to delete for a Verify Job.
- */
-const char *select_verify_del =
- "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
- "FROM Job,DelCandidates "
- "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
- "(Job.JobTDate>%s "
- "AND Job.ClientId=%s "
- "AND Job.Type='V' AND Job.Level='V' AND Job.JobStatus='T' "
- "AND Job.FileSetId=DelCandidates.FileSetId)";
-
-
-/* Select Jobs from the DelCandidates table.
- * This is the list of Jobs to delete for a Restore Job.
- */
-const char *select_restore_del =
- "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
- "FROM Job,DelCandidates "
- "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
- "(Job.JobTDate>%s "
- "AND Job.ClientId=%s "
- "AND Job.Type='R')";
-
-/* Select Jobs from the DelCandidates table.
- * This is the list of Jobs to delete for an Admin Job.
- */
-const char *select_admin_del =
- "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles "
- "FROM Job,DelCandidates "
- "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
- "(Job.JobTDate>%s "
- "AND Job.ClientId=%s "
- "AND Job.Type='D')";
-
-
-/* ======= ua_restore.c */
-const char *uar_count_files =
- "SELECT JobFiles FROM Job WHERE JobId=%s";
-
-/* List last 20 Jobs */
-const char *uar_list_jobs =
- "SELECT JobId,Client.Name as Client,StartTime,Level as "
- "JobLevel,JobFiles,JobBytes "
- "FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus='T' "
- "AND Type='B' ORDER BY StartTime DESC LIMIT 20";
-
-#ifdef HAVE_MYSQL
-/* MYSQL IS NOT STANDARD SQL !!!!! */
-/* List Jobs where a particular file is saved */
-const char *uar_file =
- "SELECT Job.JobId as JobId,"
- "CONCAT(Path.Path,Filename.Name) as Name, "
- "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
- "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
- "AND Client.ClientId=Job.ClientId "
- "AND Job.JobId=File.JobId "
- "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
-#else
-/* List Jobs where a particular file is saved */
-const char *uar_file =
- "SELECT Job.JobId as JobId,"
- "Path.Path||Filename.Name as Name, "
- "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
- "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
- "AND Client.ClientId=Job.ClientId "
- "AND Job.JobId=File.JobId "
- "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
-#endif
-
-
-/*
- * Find all files for a particular JobId and insert them into
- * the tree during a restore.
- */
-const char *uar_sel_files =
- "SELECT Path.Path,Filename.Name,FileIndex,JobId,LStat "
- "FROM File,Filename,Path "
- "WHERE File.JobId=%s AND Filename.FilenameId=File.FilenameId "
- "AND Path.PathId=File.PathId";
-
-const char *uar_del_temp = "DROP TABLE temp";
-const char *uar_del_temp1 = "DROP TABLE temp1";
-
-const char *uar_create_temp =
- "CREATE TEMPORARY TABLE temp ("
-#ifdef HAVE_POSTGRESQL
- "JobId INTEGER NOT NULL,"
- "JobTDate BIGINT,"
- "ClientId INTEGER,"
- "Level CHAR,"
- "JobFiles INTEGER,"
- "JobBytes BIGINT,"
- "StartTime TEXT,"
- "VolumeName TEXT,"
- "StartFile INTEGER,"
- "VolSessionId INTEGER,"
- "VolSessionTime INTEGER)";
-#else
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED,"
- "ClientId INTEGER UNSIGNED,"
- "Level CHAR,"
- "JobFiles INTEGER UNSIGNED,"
- "JobBytes BIGINT UNSIGNED,"
- "StartTime TEXT,"
- "VolumeName TEXT,"
- "StartFile INTEGER UNSIGNED,"
- "VolSessionId INTEGER UNSIGNED,"
- "VolSessionTime INTEGER UNSIGNED)";
-#endif
-
-const char *uar_create_temp1 =
- "CREATE TEMPORARY TABLE temp1 ("
-#ifdef HAVE_POSTGRESQL
- "JobId INTEGER NOT NULL,"
- "JobTDate BIGINT)";
-#else
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED)";
-#endif
-
-const char *uar_last_full =
- "INSERT INTO temp1 SELECT Job.JobId,JobTdate "
- "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s "
- "AND Job.ClientId=%s "
- "AND Job.StartTime<'%s' "
- "AND Level='F' AND JobStatus='T' AND Type='B' "
- "AND JobMedia.JobId=Job.JobId "
- "AND JobMedia.MediaId=Media.MediaId "
- "AND Job.FileSetId=FileSet.FileSetId "
- "AND FileSet.FileSet='%s' "
- "%s"
- "ORDER BY Job.JobTDate DESC LIMIT 1";
-
-const char *uar_full =
- "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,"
- "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes,"
- "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime "
- "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId "
- "AND Level='F' AND JobStatus='T' AND Type='B' "
- "AND JobMedia.JobId=Job.JobId "
- "AND JobMedia.MediaId=Media.MediaId";
-
-const char *uar_dif =
- "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
- "Job.Level,Job.JobFiles,Job.JobBytes,"
- "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
- "Job.VolSessionId,Job.VolSessionTime "
- "FROM Job,JobMedia,Media,FileSet "
- "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
- "AND Job.ClientId=%s "
- "AND JobMedia.JobId=Job.JobId "
- "AND JobMedia.MediaId=Media.MediaId "
- "AND Job.Level='D' AND JobStatus='T' AND Type='B' "
- "AND Job.FileSetId=FileSet.FileSetId "
- "AND FileSet.FileSet='%s' "
- "%s"
- "ORDER BY Job.JobTDate DESC LIMIT 1";
-
-const char *uar_inc =
- "INSERT INTO temp SELECT Job.JobId,Job.JobTDate,Job.ClientId,"
- "Job.Level,Job.JobFiles,Job.JobBytes,"
- "Job.StartTime,Media.VolumeName,JobMedia.StartFile,"
- "Job.VolSessionId,Job.VolSessionTime "
- "FROM Job,JobMedia,Media,FileSet "
- "WHERE Job.JobTDate>%s AND Job.StartTime<'%s' "
- "AND Job.ClientId=%s "
- "AND JobMedia.JobId=Job.JobId "
- "AND JobMedia.MediaId=Media.MediaId "
- "AND Job.Level='I' AND JobStatus='T' AND Type='B' "
- "AND Job.FileSetId=FileSet.FileSetId "
- "AND FileSet.FileSet='%s' "
- "%s";
-
-#ifdef HAVE_POSTGRESQL
-/* Note, the PostgreSQL will have a much uglier looking
- * list since it cannot do GROUP BY of different values.
- */
-const char *uar_list_temp =
- "SELECT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile"
- " FROM temp"
- " ORDER BY StartTime,StartFile ASC";
-#else
-const char *uar_list_temp =
- "SELECT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName,StartFile"
- " FROM temp"
- " GROUP BY JobId ORDER BY StartTime,StartFile ASC";
-#endif
-
-
-const char *uar_sel_jobid_temp = "SELECT JobId FROM temp ORDER BY StartTime ASC";
-
-const char *uar_sel_all_temp1 = "SELECT * FROM temp1";
-
-const char *uar_sel_all_temp = "SELECT * FROM temp";
-
-
-
-/* Select FileSet names for this Client */
-const char *uar_sel_fileset =
- "SELECT DISTINCT FileSet.FileSet FROM Job,"
- "Client,FileSet WHERE Job.FileSetId=FileSet.FileSetId "
- "AND Job.ClientId=%s AND Client.ClientId=%s "
- "ORDER BY FileSet.FileSet";
-
-/* Find MediaType used by this Job */
-const char *uar_mediatype =
- "SELECT MediaType FROM JobMedia,Media WHERE JobMedia.JobId=%s "
- "AND JobMedia.MediaId=Media.MediaId";
-
-/*
- * Find JobId, FileIndex for a given path/file and date
- * for use when inserting individual files into the tree.
- */
-const char *uar_jobid_fileindex =
- "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
- "WHERE Job.JobId=File.JobId "
- "AND Job.StartTime<'%s' "
- "AND Path.Path='%s' "
- "AND Filename.Name='%s' "
- "AND Client.Name='%s' "
- "AND Job.ClientId=Client.ClientId "
- "AND Path.PathId=File.PathId "
- "AND Filename.FilenameId=File.FilenameId "
- "ORDER BY Job.StartTime DESC LIMIT 1";
-
-const char *uar_jobids_fileindex =
- "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
- "WHERE Job.JobId IN (%s) "
- "AND Job.JobId=File.JobId "
- "AND Job.StartTime<'%s' "
- "AND Path.Path='%s' "
- "AND Filename.Name='%s' "
- "AND Client.Name='%s' "
- "AND Job.ClientId=Client.ClientId "
- "AND Path.PathId=File.PathId "
- "AND Filename.FilenameId=File.FilenameId "
- "ORDER BY Job.StartTime DESC LIMIT 1";
-
-/* Query to get all files in a directory -- no recursing
- * Note, for PostgreSQL since it respects the "Single Value
- * rule", the results of the SELECT will be unoptimized.
- * I.e. the same file will be restored multiple times, once
- * for each time it was backed up.
- */
-
-#ifdef HAVE_POSTGRESQL
-const char *uar_jobid_fileindex_from_dir =
- "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
- "WHERE Job.JobId IN (%s) "
- "AND Job.JobId=File.JobId "
- "AND Path.Path='%s' "
- "AND Client.Name='%s' "
- "AND Job.ClientId=Client.ClientId "
- "AND Path.PathId=File.Pathid "
- "AND Filename.FilenameId=File.FilenameId";
-#else
-const char *uar_jobid_fileindex_from_dir =
- "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
- "WHERE Job.JobId IN (%s) "
- "AND Job.JobId=File.JobId "
- "AND Path.Path='%s' "
- "AND Client.Name='%s' "
- "AND Job.ClientId=Client.ClientId "
- "AND Path.PathId=File.Pathid "
- "AND Filename.FilenameId=File.FilenameId "
- "GROUP BY File.FileIndex ";
-#endif
-
-/* Query to get list of files from table -- presuably built by an external program */
-const char *uar_jobid_fileindex_from_table =
- "SELECT JobId, FileIndex from %s";
extern int r_first;
extern int r_last;
extern struct s_res resources[];
-extern const char *client_backups;
/* Imported functions */
extern void do_messages(UAContext *ua, const char *cmd);
#define MAX_DEL_LIST_LEN 2000000
-/* Imported variables */
-extern const char *select_job;
-extern const char *drop_deltabs[];
-extern const char *create_deltabs[];
-extern const char *insert_delcand;
-extern const char *select_backup_del;
-extern const char *select_verify_del;
-extern const char *select_restore_del;
-extern const char *select_admin_del;
-extern const char *cnt_File;
-extern const char *cnt_DelCand;
-extern const char *del_Job;
-extern const char *del_MAC;
-extern const char *del_JobMedia;
-extern const char *cnt_JobMedia;
-extern const char *sel_JobMedia;
-
-
/* In memory list of JobIds */
struct s_file_del_ctx {
JobId_t *JobId;
#include "bacula.h"
#include "dird.h"
-extern const char *del_File;
-extern const char *upd_Purged;
-
/* Forward referenced functions */
static int purge_files_from_client(UAContext *ua, CLIENT *client);
static int purge_jobs_from_client(UAContext *ua, CLIENT *client);
/* Imported functions */
extern void print_bsr(UAContext *ua, RBSR *bsr);
-/* Imported variables */
-extern const char *uar_list_jobs, *uar_file, *uar_sel_files;
-extern const char *uar_del_temp, *uar_del_temp1, *uar_create_temp;
-extern const char *uar_create_temp1, *uar_last_full, *uar_full;
-extern const char *uar_inc, *uar_list_temp, *uar_sel_jobid_temp;
-extern const char *uar_sel_all_temp1, *uar_sel_fileset, *uar_mediatype;
-extern const char *uar_jobid_fileindex, *uar_dif, *uar_sel_all_temp;
-extern const char *uar_count_files, *uar_jobids_fileindex;
-extern const char *uar_jobid_fileindex_from_dir;
-extern const char *uar_jobid_fileindex_from_table;
-
/* Forward referenced functions */
#include "bacula.h"
#include "dird.h"
-/* External variables */
-extern const char *list_pool; /* in sql_cmds.c */
-
/* Imported functions */
void update_slots(UAContext *ua);
ECHO_CMD=@
DIRS= dll \
+ cats \
filed \
dird \
stored \
AR := $(MINGW_BIN)/mingw32-ar
RANLIB := $(MINGW_BIN)/mingw32-ranlib
WINDRES := $(MINGW_BIN)/mingw32-windres
+DLLTOOL := $(MINGW_BIN)/../mingw32/bin/dlltool
OBJCPY := $(MINGW_BIN)/mingw32-objcopy
NSIS_DIR := $(DEPKGS)/nsis
LIBS_ZLIB := \
$(DEPKGS)/lib/libz.a
-LIBS_SQL := \
+LIBS_MYSQL := \
$(DEPKGS)/lib/libmysql.a
+LIBS_POSTGRESQL := \
+ $(DEPKGS)/lib/libpq.a
+
LIBS_SSL := \
$(DEPKGS)/lib/libssl.dll.a
$(DEPKGS)/lib/wx_dll/libwxmsw26_core.a \
$(DEPKGS)/lib/wx_dll/libwxbase26.a
+LIBS_CATS := \
+ $(LIBDIR)/libcats.a
+
LIBS_BACULA := \
$(LIBDIR)/libbacula.a
-DHAVE_MINGW \
-DHAVE_ZLIB_H \
-DHAVE_LIBZ \
- -DHAVE_MYSQL \
-DHAVE_CRYPTO \
- -DWIN32_VSS \
-DHAVE_OPENSSL \
-DHAVE_TLS
--- /dev/null
+*.o
+*.d
+libcats.exp
+bdb
+mysql
+pgsql
#
# Makefile for win32 bacula executables
# Using MinGW cross-compiler on GNU/Linux
-#
-# Written for Bacula by Howard Thomson, April 2006
-#
+#
+# Written by Robert Nelson, June 2006
+#
include ../Makefile.inc
INCLUDES = \
- $(INCLUDE_GCC) \
- $(INCLUDE_MINGW) \
$(INCLUDE_PTHREADS) \
$(INCLUDE_BACULA) \
$(INCLUDE_ZLIB) \
- $(INCLUDE_VSS) \
- $(INCLUDE_ICONS) \
$(INCLUDE_OPENSSL) \
$(INCLUDE_MYSQL)
DEFINES = \
- -DWIN32 \
$(HAVES)
+VPATH = ../../cats
+
######################################################################
-# Files in src/cats
-
-LIB_OBJS = \
- $(OBJDIR)/bdb.o \
- $(OBJDIR)/bdb_create.o \
- $(OBJDIR)/bdb_delete.o \
- $(OBJDIR)/bdb_find.o \
- $(OBJDIR)/bdb_get.o \
- $(OBJDIR)/bdb_list.o \
- $(OBJDIR)/bdb_update.o \
- $(OBJDIR)/mysql.o \
- $(OBJDIR)/postgresql.o \
- $(OBJDIR)/sql.o \
- $(OBJDIR)/sql_create.o \
- $(OBJDIR)/sql_delete.o \
- $(OBJDIR)/sql_find.o \
- $(OBJDIR)/sql_get.o \
- $(OBJDIR)/sql_list.o \
- $(OBJDIR)/sql_update.o \
- $(OBJDIR)/sqlite.o
+# Files files in src/lib
+
+BDB_OBJS = \
+ bdb.o \
+ bdb_create.o \
+ bdb_delete.o \
+ bdb_find.o \
+ bdb_get.o \
+ bdb_list.o \
+ bdb_update.o \
+
+SQL_OBJS = \
+ sql.o \
+ sql_cmds.o \
+ sql_create.o \
+ sql_delete.o \
+ sql_find.o \
+ sql_get.o \
+ sql_list.o \
+ sql_update.o \
+
+LIBS_DLL = \
+ $(LIBS_BACULA)
######################################################################
.PHONY: all clean
-all: $(LIBDIR)/libcats.a
+all: $(BINDIR)/cats_mysql.dll $(BINDIR)/cats_pgsql.dll $(BINDIR)/cats_bdb.dll
clean:
@echo "Cleaning `pwd`"
- $(ECHO_CMD)rm -f $(OBJDIR)/*.[od] $(LIBDIR)/libcats.a
+ $(call clean_obj,$(addprefix $(OBJDIR)/mysql/,mysql.o $(SQL_OBJS)))
+ $(call clean_obj,$(addprefix $(OBJDIR)/pgsql/,postgresql.o $(SQL_OBJS)))
+ $(call clean_obj,$(addprefix $(OBJDIR)/bdb/,sql_cmds.o $(BDB_OBJS)))
+ $(call clean_exe,$(BINDIR)/cats_mysql.dll)
+ $(call clean_exe,$(BINDIR)/cats_pgsql.dll)
+ $(call clean_exe,$(BINDIR)/cats_bdb.dll)
+ $(ECHO_CMD)rm -f $(OBJDIR)/libcats.exp $(LIBDIR)/libcats.a
-#
-# Rules
-#
+$(LIBDIR)/libcats.a $(OBJDIR)/libcats.exp: bacula_cats.def
+ $(DLLTOOL) --dllname bacula_cats.dll --no-export-all-symbols --input-def bacula_cats.def --output-exp $(OBJDIR)/libcats.exp --output-lib $(LIBDIR)/libcats.a $^
+
+$(BINDIR)/cats_mysql.dll: $(addprefix $(OBJDIR)/mysql/,mysql.o $(SQL_OBJS)) $(OBJDIR)/libcats.exp
+ @echo "Linking $@"
+ $(call checkdir,$@)
+ $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_MYSQL) $(LIBS_DLL) -o $@
+
+$(BINDIR)/cats_pgsql.dll: $(addprefix $(OBJDIR)/pgsql/,postgresql.o $(SQL_OBJS)) $(OBJDIR)/libcats.exp
+ @echo "Linking $@"
+ $(call checkdir,$@)
+ $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_POSTGRESQL) $(LIBS_DLL) -o $@
-$(LIBDIR)/libcats.a: $(LIB_OBJS)
- @echo "Updating archive $@"
+$(BINDIR)/cats_bdb.dll: $(addprefix $(OBJDIR)/bdb/,sql_cmds.o $(BDB_OBJS)) $(OBJDIR)/libcats.exp
+ @echo "Linking $@"
$(call checkdir,$@)
- $(ECHO_CMD)$(AR) rs $@ $^ $(LIB_OBJS)
+ $(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows $^ $(LIBS_DLL) -o $@
+
+#
+# Rules for generating from ../cats
+#
include ../Makefile.rules
-ifneq ($(MAKECMDGOALS),clean)
-include $(patsubst %.o,%.d,$(filter-out %.res,$(LIB_OBJS)))
-endif
+define Link_Dll
+$(OBJDIR)/$(1)/%.o: %.c
+ @echo "Compiling $$<"
+ $$(call checkdir,$$@)
+ $(ECHO_CMD)$(CXX) -DBUILDING_CATS -DUSING_DLL -DHAVE_$(2) $(CFLAGS) -c $$< -o $$@
+
+$(OBJDIR)/$(1)/%.o: %.cpp
+ @echo "Compiling $$<"
+ $$(call checkdir,$$@)
+ $(ECHO_CMD)$(CXX) -DBUILDING_CATS -DUSING_DLL -DHAVE_$(2) $(CFLAGS) -c $$< -o $$@
+
+endef
+
+$(eval $(call Link_Dll,mysql,MYSQL))
+
+$(eval $(call Link_Dll,pgsql,POSTGRESQL))
+
+$(eval $(call Link_Dll,bdb,BACULA_DB))
+
--- /dev/null
+LIBRARY bacula_cats.dll
+EXPORTS
+
+; bdb.c:
+; mysql.c:
+; postgresql.c:
+; sqlite.c:
+_Z12db_sql_queryP4B_DBPKcPFiPviPPcES3_
+;_Z13db_next_indexP3JCRP4B_DBPcS3_
+_Z16db_escape_stringPcS_i
+_Z16db_init_databaseP3JCRPKcS2_S2_S2_iS2_i
+_Z16db_open_databaseP3JCRP4B_DB
+_Z17db_close_databaseP3JCRP4B_DB
+
+; sql.c:
+_Z10_db_unlockPKciP4B_DB
+_Z11db_strerrorP4B_DB
+;_Z11list_dashesP4B_DBPFvPvPKcES1_
+;_Z11list_resultP3JCRP4B_DBPFvPvPKcES3_11e_list_type
+_Z16db_int64_handlerPviPPc
+_Z18db_end_transactionP3JCRP4B_DB
+;_Z18get_sql_record_maxP3JCRP4B_DB
+;_Z19split_path_and_fileP3JCRP4B_DBPKc
+;_Z20check_tables_versionP3JCRP4B_DB
+_Z20db_start_transactionP3JCRP4B_DB
+;_Z7QueryDBPKciP3JCRP4B_DBPc
+_Z8_db_lockPKciP4B_DB
+;_Z8DeleteDBPKciP3JCRP4B_DBPc
+;_Z8InsertDBPKciP3JCRP4B_DBPc
+;_Z8UpdateDBPKciP3JCRP4B_DBPc
+
+; bdb_create.c:
+; sql_create.c:
+_Z20db_create_job_recordP3JCRP4B_DBP7JOB_DBR
+_Z21db_create_pool_recordP3JCRP4B_DBP8POOL_DBR
+_Z22db_create_media_recordP3JCRP4B_DBP9MEDIA_DBR
+_Z23db_create_client_recordP3JCRP4B_DBP10CLIENT_DBR
+;_Z23db_create_device_recordP3JCRP4B_DBP10DEVICE_DBR
+_Z24db_create_counter_recordP3JCRP4B_DBP11COUNTER_DBR
+_Z24db_create_fileset_recordP3JCRP4B_DBP11FILESET_DBR
+_Z24db_create_storage_recordP3JCRP4B_DBP11STORAGE_DBR
+_Z25db_create_jobmedia_recordP3JCRP4B_DBP12JOBMEDIA_DBR
+_Z26db_create_mediatype_recordP3JCRP4B_DBP13MEDIATYPE_DBR
+_Z32db_create_file_attributes_recordP3JCRP4B_DBP8ATTR_DBR
+
+; bdb_delete.c:
+; sql_delete.c:
+_Z21db_delete_pool_recordP3JCRP4B_DBP8POOL_DBR
+;_Z21db_purge_media_recordP3JCRP4B_DBP9MEDIA_DBR
+_Z22db_delete_media_recordP3JCRP4B_DBP9MEDIA_DBR
+
+; bdb_find.c:
+; sql_find.c:
+_Z18db_find_last_jobidP3JCRP4B_DBPKcP7JOB_DBR
+_Z19db_find_next_volumeP3JCRP4B_DBibP9MEDIA_DBR
+_Z22db_find_job_start_timeP3JCRP4B_DBP7JOB_DBRPPc
+_Z24db_find_failed_job_sinceP3JCRP4B_DBP7JOB_DBRPcRi
+
+; bdb_get.c:
+; sql_get.c:
+_Z15db_get_pool_idsP3JCRP4B_DBPiPPj
+_Z16db_get_media_idsP3JCRP4B_DBjPiPPj
+_Z17db_get_client_idsP3JCRP4B_DBPiPPj
+_Z17db_get_job_recordP3JCRP4B_DBP7JOB_DBR
+_Z18db_get_pool_recordP3JCRP4B_DBP8POOL_DBR
+_Z19db_get_media_recordP3JCRP4B_DBP9MEDIA_DBR
+_Z20db_get_client_recordP3JCRP4B_DBP10CLIENT_DBR
+;_Z21db_get_counter_recordP3JCRP4B_DBP11COUNTER_DBR
+_Z21db_get_fileset_recordP3JCRP4B_DBP11FILESET_DBR
+_Z23db_get_job_volume_namesP3JCRP4B_DBjPPc
+;_Z23db_get_num_pool_recordsP3JCRP4B_DB
+;_Z24db_get_num_media_recordsP3JCRP4B_DB
+_Z28db_get_job_volume_parametersP3JCRP4B_DBjPP10VOL_PARAMS
+_Z29db_get_file_attributes_recordP3JCRP4B_DBPcP7JOB_DBRP8FILE_DBR
+
+; bdb_list.c:
+; sql_list.c:
+_Z17db_list_sql_queryP3JCRP4B_DBPKcPFvPvS4_ES5_i11e_list_type
+_Z18db_list_job_totalsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_
+_Z19db_list_job_recordsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_11e_list_type
+_Z20db_list_pool_recordsP3JCRP4B_DBP8POOL_DBRPFvPvPKcES5_11e_list_type
+_Z21db_list_files_for_jobP3JCRP4B_DBjPFvPvPKcES3_
+_Z21db_list_media_recordsP3JCRP4B_DBP9MEDIA_DBRPFvPvPKcES5_11e_list_type
+_Z22db_list_client_recordsP3JCRP4B_DBPFvPvPKcES3_11e_list_type
+_Z24db_list_jobmedia_recordsP3JCRP4B_DBjPFvPvPKcES3_11e_list_type
+
+; bdb_update.c:
+; sql_update.c:
+_Z19db_mark_file_recordP3JCRP4B_DBjj
+_Z21db_update_pool_recordP3JCRP4B_DBP8POOL_DBR
+_Z22db_update_media_recordP3JCRP4B_DBP9MEDIA_DBR
+_Z23db_update_client_recordP3JCRP4B_DBP10CLIENT_DBR
+_Z24db_make_inchanger_uniqueP3JCRP4B_DBP9MEDIA_DBR
+_Z24db_update_counter_recordP3JCRP4B_DBP11COUNTER_DBR
+_Z24db_update_job_end_recordP3JCRP4B_DBP7JOB_DBR
+_Z24db_update_media_defaultsP3JCRP4B_DBP9MEDIA_DBR
+_Z24db_update_storage_recordP3JCRP4B_DBP11STORAGE_DBR
+_Z26db_update_job_start_recordP3JCRP4B_DBP7JOB_DBR
+_Z28db_add_digest_to_file_recordP3JCRP4B_DBjPci
+
+client_backups DATA
+list_pool DATA
+drop_deltabs DATA
+create_deltabs DATA
+insert_delcand DATA
+select_backup_del DATA
+select_verify_del DATA
+select_restore_del DATA
+select_admin_del DATA
+select_job DATA
+del_File DATA
+;cnt_File DATA
+cnt_DelCand DATA
+del_Job DATA
+del_MAC DATA
+del_JobMedia DATA
+cnt_JobMedia DATA
+sel_JobMedia DATA
+upd_Purged DATA
+
+uar_list_jobs DATA
+uar_file DATA
+uar_count_files DATA
+uar_sel_files DATA
+uar_del_temp DATA
+uar_del_temp1 DATA
+uar_create_temp DATA
+uar_create_temp1 DATA
+uar_last_full DATA
+uar_full DATA
+uar_inc DATA
+uar_list_temp DATA
+uar_sel_all_temp1 DATA
+uar_sel_fileset DATA
+uar_mediatype DATA
+uar_jobid_fileindex DATA
+uar_dif DATA
+uar_sel_all_temp DATA
+uar_count_files DATA
+uar_jobids_fileindex DATA
+uar_jobid_fileindex_from_dir DATA
+uar_jobid_fileindex_from_table DATA
+uar_sel_jobid_temp DATA
--- /dev/null
+rem \r
+rem shell script to create Bacula database(s)\r
+rem \r
+rem Nothing to do \r
-rem
-rem Script to create Bacula database(s)
-rem
-
-%SQL_BINDIR%\mysql $* -e "CREATE DATABASE bacula;"
-set RESULT=%ERRORLEVEL%
-if %RESULT% GTR 0 goto :ERROR
-echo "Creation of bacula database succeeded."
-exit /b 0
-
-:ERROR
-echo "Creation of bacula database failed."
-exit /b %RESULT%
+rem\r
+rem Script to create Bacula database(s)\r
+rem\r
+\r
+%SQL_BINDIR%\mysql $* -e "CREATE DATABASE bacula;"\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% GTR 0 goto :ERROR\r
+echo "Creation of bacula database succeeded."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Creation of bacula database failed."\r
+exit /b %RESULT%\r
--- /dev/null
+rem \r
+rem shell script to create Bacula database(s)\r
+rem \r
+\r
+bindir=@SQL_BINDIR@\r
+\r
+rem use SQL_ASCII to be able to put any filename into\r
+rem the database even those created with unusual character sets\r
+ENCODING="ENCODING 'SQL_ASCII'"\r
+rem use UTF8 if you are using standard Unix/Linux LANG specifications\r
+rem that use UTF8 -- this is normally the default and *should* be\r
+rem your standard. Bacula consoles work correctly *only* with UTF8.\r
+rem ENCODING="ENCODING 'UTF8'"\r
+ \r
+$bindir/psql -f create_postgresql_database.sql -d template1 $*\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Creation of bacula database succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Creation of bacula database failed."\r
+EXIT /b 1\r
--- /dev/null
+CREATE DATABASE bacula $ENCODING;\r
+ALTER DATABASE bacula SET datestyle TO 'ISO, YMD';\r
-rem
-rem This script deletes a catalog dump
-rem
-del /f %WORKING_DIR%/bacula.sql
+rem\r
+rem This script deletes a catalog dump\r
+rem\r
+del /f %WORKING_DIR%/bacula.sql\r
--- /dev/null
+rem\r
+rem \r
+rem shell script to drop Bacula database(s)\r
+rem \r
+rem Nothing to do \r
--- /dev/null
+rem \r
+rem shell script to Delete the Bacula database (same as deleting \r
+rem the tables)\r
+rem \r
+\r
+del /f @working_dir@/control.db\r
+del /f @working_dir@/jobs.db\r
+del /f @working_dir@/pools.db\r
+del /f @working_dir@/media.db\r
+del /f @working_dir@/jobmedia.db\r
+del /f @working_dir@/client.db\r
+del /f @working_dir@/fileset.db\r
-rem
-rem shell script to drop Bacula database(s)
-rem
-
-%SQL_BINDIR%/mysql $* -f -e "DROP DATABASE bacula;"
-set RESULT=%ERRORLEVEL%
-if %RESULT% GTR 0 goto :ERROR
-echo "Drop of bacula database succeeded."
-exit /b 0
-
-:ERROR
-echo "Drop of bacula database failed."
-exit /b %RESULT%
+rem\r
+rem shell script to drop Bacula database(s)\r
+rem\r
+\r
+%SQL_BINDIR%/mysql $* -f -e "DROP DATABASE bacula;"\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% GTR 0 goto :ERROR\r
+echo "Drop of bacula database succeeded."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Drop of bacula database failed."\r
+exit /b %RESULT%\r
-rem
-rem Script to delete Bacula tables for MySQL
-rem
-
-if %SQL_BINDIR%/mysql $* < drop_mysql_tables.sql
-set RESULT=%ERRORLEVEL%
-if %RESULT% GTR 0 goto :ERROR
-echo "Deletion of Bacula MySQL tables succeeded."
-exit /b 0
-
-:ERROR
-echo "Deletion of Bacula MySQL tables failed."
-exit /b %RESULT%
+rem\r
+rem Script to delete Bacula tables for MySQL\r
+rem\r
+\r
+if %SQL_BINDIR%/mysql $* < drop_mysql_tables.sql\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% GTR 0 goto :ERROR\r
+echo "Deletion of Bacula MySQL tables succeeded."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Deletion of Bacula MySQL tables failed."\r
+exit /b %RESULT%\r
-USE bacula;
-DROP TABLE IF EXISTS Filename;
-DROP TABLE IF EXISTS Path;
-DROP TABLE IF EXISTS LongName;
-DROP TABLE IF EXISTS Device;
-DROP TABLE IF EXISTS Storage;
-DROP TABLE IF EXISTS MediaType;
-DROP TABLE IF EXISTS File;
-DROP TABLE IF EXISTS Client;
-DROP TABLE IF EXISTS Job;
-DROP TABLE IF EXISTS Media;
-DROP TABLE IF EXISTS MAC;
-DROP TABLE IF EXISTS JobMedia;
-DROP TABLE IF EXISTS Pool;
-DROP TABLE IF EXISTS MultiVolume;
-DROP TABLE IF EXISTS FileSave;
-DROP TABLE IF EXISTS FileSet;
-DROP TABLE IF EXISTS Version;
-DROP TABLE IF EXISTS Counters;
-DROP TABLE IF EXISTS BaseFiles;
-DROP TABLE IF EXISTS UnsavedFiles;
-DROP TABLE IF EXISTS CDImages;
-DROP TABLE IF EXISTS Status;
-DROP TABLE IF EXISTS MAC;
-DROP TABLE IF EXISTS Location;
+USE bacula;\r
+DROP TABLE IF EXISTS Filename;\r
+DROP TABLE IF EXISTS Path;\r
+DROP TABLE IF EXISTS LongName;\r
+DROP TABLE IF EXISTS Device;\r
+DROP TABLE IF EXISTS Storage;\r
+DROP TABLE IF EXISTS MediaType;\r
+DROP TABLE IF EXISTS File;\r
+DROP TABLE IF EXISTS Client;\r
+DROP TABLE IF EXISTS Job;\r
+DROP TABLE IF EXISTS Media;\r
+DROP TABLE IF EXISTS MAC;\r
+DROP TABLE IF EXISTS JobMedia;\r
+DROP TABLE IF EXISTS Pool;\r
+DROP TABLE IF EXISTS MultiVolume;\r
+DROP TABLE IF EXISTS FileSave; \r
+DROP TABLE IF EXISTS FileSet; \r
+DROP TABLE IF EXISTS Version;\r
+DROP TABLE IF EXISTS Counters;\r
+DROP TABLE IF EXISTS BaseFiles;\r
+DROP TABLE IF EXISTS UnsavedFiles;\r
+DROP TABLE IF EXISTS CDImages;\r
+DROP TABLE IF EXISTS Status;\r
+DROP TABLE IF EXISTS MAC;\r
+DROP TABLE IF EXISTS Location;\r
--- /dev/null
+rem \r
+rem shell script to drop Bacula database(s)\r
+rem \r
+\r
+bindir=@SQL_BINDIR@\r
+\r
+$bindir/dropdb bacula\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Drop of bacula database succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Drop of bacula database failed."\r
+EXIT /b 1\r
--- /dev/null
+rem \r
+rem shell script to delete Bacula tables for PostgreSQL\r
+rem\r
+\r
+bindir=@SQL_BINDIR@\r
+\r
+$bindir/psql -f drop_postgresql_tables.sql -d bacula $*\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Deletion of Bacula PostgreSQL tables succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Deletion of Bacula PostgreSQL tables failed."\r
+EXIT /b 1\r
--- /dev/null
+drop table unsavedfiles;\r
+drop table basefiles;\r
+drop table jobmedia;\r
+drop table file;\r
+drop table job;\r
+drop table media;\r
+drop table client;\r
+drop table pool;\r
+drop table fileset;\r
+drop table path;\r
+drop table filename;\r
+drop table counters;\r
+drop table version;\r
+drop table CDImages;\r
+drop table Device;\r
+drop table Storage;\r
+drop table MediaType;\r
+drop table Status;\r
+drop table MAC;\r
+drop table log;\r
+drop table Location;\r
+drop table locationlog;\r
--- /dev/null
+#!/bin/sh
+#
+# Shell script to fix PostgreSQL tables in version 8
+#
+echo " "
+echo "This script will fix a Bacula PostgreSQL database version 8"
+echo "Depending on the size of your database,"
+echo "this script may take several minutes to run."
+echo " "
+#
+# Set the following to the path to psql.
+bindir=****EDIT-ME to be the path to psql****
+
+if $bindir/psql $* -f - <<END-OF-DATA
+\c bacula
+
+begin;
+
+alter table media rename column endblock to endblock_old;
+alter table media add column endblock bigint;
+update media set endblock = endblock_old;
+alter table media alter column endblock set not null;
+alter table media drop column endblock_old;
+
+commit;
+
+vacuum;
+
+END-OF-DATA
+then
+ echo "Update of Bacula PostgreSQL tables succeeded."
+else
+ echo "Update of Bacula PostgreSQL tables failed."
+fi
+exit 0
--- /dev/null
+rem \r
+rem Shell script to fix PostgreSQL tables in version 8\r
+rem \r
+\r
+echo " "\r
+echo "This script will fix a Bacula PostgreSQL database version 8"\r
+echo "Depending on the size of your database,"\r
+echo "this script may take several minutes to run."\r
+echo " "\r
+#\r
+# Set the following to the path to psql.\r
+bindir=****EDIT-ME to be the path to psql****\r
+\r
+$bindir/psql $* -f fix_postgresql_tables.sql\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Update of Bacula PostgreSQL tables succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Update of Bacula PostgreSQL tables failed."\r
+EXIT /b 1\r
--- /dev/null
+\c bacula\r
+\r
+begin;\r
+\r
+alter table media rename column endblock to endblock_old;\r
+alter table media add column endblock bigint;\r
+update media set endblock = endblock_old;\r
+alter table media alter column endblock set not null;\r
+alter table media drop column endblock_old;\r
+\r
+commit;\r
+\r
+vacuum;\r
--- /dev/null
+rem !/bin/sh\r
+rem \r
+rem shell script to grant privileges to the bdb database\r
+rem \r
+rem nothing to do here\r
-rem
-rem Script to grant privileges to the bacula database
-rem
-
-%SQL_BINDIR%\mysql $* -u root -f < grant_mysql_privileges.sql
-set RESULT=%ERRORLEVEL%
-if %RESULT% GTR 0 goto :ERROR
-echo "Privileges for bacula granted."
-exit /b 0
-
-:ERROR
-echo "Error creating privileges."
-exit /b %RESULT%
+rem\r
+rem Script to grant privileges to the bacula database\r
+rem\r
+\r
+%SQL_BINDIR%\mysql $* -u root -f < grant_mysql_privileges.sql\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% GTR 0 goto :ERROR\r
+echo "Privileges for bacula granted."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Error creating privileges."\r
+exit /b %RESULT%\r
-use mysql
-grant all privileges on bacula.* to bacula@localhost;
-grant all privileges on bacula.* to bacula@"%";
-select * from user;
-flush privileges;
+use mysql\r
+grant all privileges on bacula.* to bacula@localhost;\r
+grant all privileges on bacula.* to bacula@"%";\r
+select * from user;\r
+flush privileges;\r
--- /dev/null
+rem \r
+rem shell script to grant privileges to the bacula database\r
+rem \r
+USER=bacula\r
+bindir=@SQL_BINDIR@\r
+\r
+$bindir/psql -f grant_postgresql_privileges.sql -d bacula $*\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Error creating privileges."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Drop of bacula database failed."\r
+EXIT /b 1\r
--- /dev/null
+create user ${USER};\r
+\r
+-- for tables\r
+grant all on unsavedfiles to ${USER};\r
+grant all on basefiles to ${USER};\r
+grant all on jobmedia to ${USER};\r
+grant all on file to ${USER};\r
+grant all on job to ${USER};\r
+grant all on media to ${USER};\r
+grant all on client to ${USER};\r
+grant all on pool to ${USER};\r
+grant all on fileset to ${USER};\r
+grant all on path to ${USER};\r
+grant all on filename to ${USER};\r
+grant all on counters to ${USER};\r
+grant all on version to ${USER};\r
+grant all on cdimages to ${USER};\r
+grant all on mediatype to ${USER};\r
+grant all on storage to ${USER};\r
+grant all on device to ${USER};\r
+grant all on status to ${USER};\r
+\r
+-- for sequences on those tables\r
+\r
+grant select, update on filename_filenameid_seq to ${USER};\r
+grant select, update on path_pathid_seq to ${USER};\r
+grant select, update on fileset_filesetid_seq to ${USER};\r
+grant select, update on pool_poolid_seq to ${USER};\r
+grant select, update on client_clientid_seq to ${USER};\r
+grant select, update on media_mediaid_seq to ${USER};\r
+grant select, update on job_jobid_seq to ${USER};\r
+grant select, update on file_fileid_seq to ${USER};\r
+grant select, update on jobmedia_jobmediaid_seq to ${USER};\r
+grant select, update on basefiles_baseid_seq to ${USER};\r
+grant select, update on storage_storageid_seq to ${USER};\r
+grant select, update on mediatype_mediatypeid_seq to ${USER};\r
+grant select, update on device_deviceid_seq to ${USER};\r
--- /dev/null
+rem \r
+rem shell script to create Bacula tables\r
+rem \r
+rem Nothing to do -- created by Bacula\r
+rem \r
-#!/bin/sh
-#
-# This script dumps your Bacula catalog in ASCII format
-# It works for MySQL, SQLite, and PostgreSQL
-#
-# $1 is the name of the database to be backed up and the name
-# of the output file (default = bacula
-# $2 is the user name with which to access the database
-# (default = bacula).
-# $3 is the password with which to access the database or "" if no password
-# (default "")
-#
-#
-cd %WORKING_DIR%
-del /f bacula.sql
-
-set MYSQLPASSWORD=
-
-if "%3"!="" set MYSQLPASSWORD=" --password=%3"
-%SQL_BINDIR%/mysqldump -u %2 %MYSQLPASSWORD% -f --opt %1 >%1.sql
-
-#
-# To read back a MySQL database use:
-# cd @working_dir@
-# rm -f @SQL_BINDIR@/../var/bacula/*
-# mysql <bacula.sql
-#
-# To read back a SQLite database use:
-# cd @working_dir@
-# rm -f bacula.db
-# sqlite bacula.db <bacula.sql
-#
-# To read back a PostgreSQL database use:
-# cd @working_dir@
-# dropdb bacula
-# createdb bacula
-# psql bacula <bacula.sql
-#
+#!/bin/sh\r
+#\r
+# This script dumps your Bacula catalog in ASCII format\r
+# It works for MySQL, SQLite, and PostgreSQL\r
+#\r
+# $1 is the name of the database to be backed up and the name\r
+# of the output file (default = bacula\r
+# $2 is the user name with which to access the database\r
+# (default = bacula).\r
+# $3 is the password with which to access the database or "" if no password\r
+# (default "")\r
+#\r
+#\r
+cd %WORKING_DIR%\r
+del /f bacula.sql\r
+\r
+set MYSQLPASSWORD=\r
+\r
+if "%3"!="" set MYSQLPASSWORD=" --password=%3"\r
+%SQL_BINDIR%/mysqldump -u %2 %MYSQLPASSWORD% -f --opt %1 >%1.sql\r
+\r
+#\r
+# To read back a MySQL database use: \r
+# cd @working_dir@\r
+# rm -f @SQL_BINDIR@/../var/bacula/*\r
+# mysql <bacula.sql\r
+#\r
+# To read back a SQLite database use:\r
+# cd @working_dir@\r
+# rm -f bacula.db\r
+# sqlite bacula.db <bacula.sql\r
+#\r
+# To read back a PostgreSQL database use:\r
+# cd @working_dir@\r
+# dropdb bacula\r
+# createdb bacula\r
+# psql bacula <bacula.sql\r
+#\r
-rem
-rem Script to create Bacula MySQL tables
-rem
-
-%SQL_BINDIR%\mysql -f < make_mysql_tables.sql
-set RESULT=%ERRORLEVEL%
-if %RESULT% gt 0 goto :ERROR
-echo "Creation of Bacula MySQL tables succeeded."
-exit /b 0
-
-:ERROR
-echo "Creation of Bacula MySQL tables failed."
-exit /b %RESULT%
+rem\r
+rem Script to create Bacula MySQL tables\r
+rem\r
+\r
+%SQL_BINDIR%\mysql -f < make_mysql_tables.sql\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% gt 0 goto :ERROR\r
+echo "Creation of Bacula MySQL tables succeeded."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Creation of Bacula MySQL tables failed."\r
+exit /b %RESULT%\r
-USE bacula;
---
--- Note, we use BLOB rather than TEXT because in MySQL,
--- BLOBs are identical to TEXT except that BLOB is case
--- sensitive in sorts, which is what we want, and TEXT
--- is case insensitive.
---
-CREATE TABLE Filename (
- FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name BLOB NOT NULL,
- PRIMARY KEY(FilenameId),
- INDEX (Name(255))
- );
-
-CREATE TABLE Path (
- PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Path BLOB NOT NULL,
- PRIMARY KEY(PathId),
- INDEX (Path(255))
- );
-
-
-CREATE TABLE File (
- FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,
- FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,
- MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0,
- LStat TINYBLOB NOT NULL,
- MD5 TINYBLOB NOT NULL,
- PRIMARY KEY(FileId),
- INDEX (JobId),
- INDEX (JobId, PathId, FilenameId)
- );
-
-#
-# Possibly add one or more of the following indexes
-# to the above File table if your Verifies are
-# too slow.
-#
-# INDEX (PathId),
-# INDEX (FilenameId),
-# INDEX (FilenameId, PathId)
-# INDEX (JobId),
-#
-
-CREATE TABLE MediaType (
- MediaTypeId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- MediaType TINYBLOB NOT NULL,
- ReadOnly TINYINT DEFAULT 0,
- PRIMARY KEY(MediaTypeId)
- );
-
-CREATE TABLE Storage (
- StorageId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- AutoChanger TINYINT DEFAULT 0,
- PRIMARY KEY(StorageId)
- );
-
-CREATE TABLE Device (
- DeviceId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- MediaTypeId INTEGER UNSIGNED NOT NULL REFERENCES MediaType,
- StorageId INTEGER UNSIGNED NOT NULL REFERENCES Storage,
- DevMounts INTEGER UNSIGNED DEFAULT 0,
- DevReadBytes BIGINT UNSIGNED DEFAULT 0,
- DevWriteBytes BIGINT UNSIGNED DEFAULT 0,
- DevReadBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,
- DevWriteBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,
- DevReadTime BIGINT UNSIGNED DEFAULT 0,
- DevWriteTime BIGINT UNSIGNED DEFAULT 0,
- DevReadTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,
- DevWriteTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,
- CleaningDate DATETIME DEFAULT 0,
- CleaningPeriod BIGINT UNSIGNED DEFAULT 0,
- PRIMARY KEY(DeviceId)
- );
-
-
-CREATE TABLE Job (
- JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Job TINYBLOB NOT NULL,
- Name TINYBLOB NOT NULL,
- Type BINARY(1) NOT NULL,
- Level BINARY(1) NOT NULL,
- ClientId INTEGER NULL REFERENCES Client,
- JobStatus BINARY(1) NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME NULL,
- EndTime DATETIME NULL,
- JobTDate BIGINT UNSIGNED NOT NULL,
- VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
- JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- PoolId INTEGER UNSIGNED NULL REFERENCES Pool,
- FileSetId INTEGER UNSIGNED NULL REFERENCES FileSet,
- PurgedFiles TINYINT NOT NULL DEFAULT 0,
- HasBase TINYINT NOT NULL DEFAULT 0,
- PRIMARY KEY(JobId),
- INDEX (Name(128))
- );
-
-CREATE TABLE MAC (
- JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- OriginalJobId INTEGER UNSIGNED NOT NULL,
- JobType BINARY(1) NOT NULL,
- JobLevel BINARY(1) NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME NOT NULL,
- EndTime DATETIME NOT NULL,
- JobTDate BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY(JobId)
- );
-
-CREATE TABLE Location (
- LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Location TINYBLOB NOT NULL,
- PRIMARY KEY(LocationId)
- );
-
-#
-CREATE TABLE FileSet (
- FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- FileSet TINYBLOB NOT NULL,
- MD5 TINYBLOB NOT NULL,
- CreateTime DATETIME NOT NULL,
- PRIMARY KEY(FileSetId)
- );
-
-CREATE TABLE JobMedia (
- JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media,
- FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0,
- EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0,
- StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,
- EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- Copy INTEGER UNSIGNED NOT NULL DEFAULT 0,
- Stripe INTEGER UNSIGNED NOT NULL DEFAULT 0,
- PRIMARY KEY(JobMediaId),
- INDEX (JobId, MediaId)
- );
-
-
-CREATE TABLE Media (
- MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- VolumeName TINYBLOB NOT NULL,
- Slot INTEGER NOT NULL DEFAULT 0,
- PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool,
- MediaType TINYBLOB NOT NULL,
- MediaTypeId INTEGER UNSIGNED NOT NULL REFERENCES MediaType,
- LabelType TINYINT NOT NULL DEFAULT 0,
- FirstWritten DATETIME NULL,
- LastWritten DATETIME NULL,
- LabelDate DATETIME NULL,
- VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolParts INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolCapacityBytes BIGINT UNSIGNED NOT NULL,
- VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged',
- 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,
- Recycle TINYINT NOT NULL DEFAULT 0,
- VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0,
- MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
- InChanger TINYINT NOT NULL DEFAULT 0,
- StorageId INTEGER UNSIGNED NOT NULL REFERENCES Storage,
- DeviceId INTEGER UNSIGNED NOT NULL REFERENCES Device,
- MediaAddressing TINYINT NOT NULL DEFAULT 0,
- VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
- EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0,
- EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,
- LocationId INTEGER UNSIGNED NOT NULL REFERENCES Location,
- RecycleCount INTEGER UNSIGNED DEFAULT 0,
- InitialWrite DATETIME NULL,
- ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- PRIMARY KEY(MediaId),
- INDEX (PoolId)
- );
-
-CREATE INDEX inx8 ON Media (PoolId);
-
-
-
-CREATE TABLE Pool (
- PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0,
- UseOnce TINYINT NOT NULL,
- UseCatalog TINYINT NOT NULL,
- AcceptAnyVolume TINYINT DEFAULT 0,
- VolRetention BIGINT UNSIGNED NOT NULL,
- VolUseDuration BIGINT UNSIGNED NOT NULL,
- MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED NOT NULL,
- AutoPrune TINYINT DEFAULT 0,
- Recycle TINYINT DEFAULT 0,
- PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL,
- LabelType TINYINT NOT NULL DEFAULT 0,
- LabelFormat TINYBLOB,
- Enabled TINYINT DEFAULT 1,
- ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- NextPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
- MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
- MigrationTime BIGINT UNSIGNED DEFAULT 0,
- UNIQUE (Name(128)),
- PRIMARY KEY (PoolId)
- );
-
-
-CREATE TABLE Client (
- ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- Uname TINYBLOB NOT NULL, /* full uname -a of client */
- AutoPrune TINYINT DEFAULT 0,
- FileRetention BIGINT UNSIGNED NOT NULL,
- JobRetention BIGINT UNSIGNED NOT NULL,
- UNIQUE (Name(128)),
- PRIMARY KEY(ClientId)
- );
-
-CREATE TABLE BaseFiles (
- BaseId INTEGER UNSIGNED AUTO_INCREMENT,
- BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- FileId INTEGER UNSIGNED NOT NULL REFERENCES File,
- FileIndex INTEGER UNSIGNED,
- PRIMARY KEY(BaseId)
- );
-
-CREATE TABLE UnsavedFiles (
- UnsavedId INTEGER UNSIGNED AUTO_INCREMENT,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,
- FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,
- PRIMARY KEY (UnsavedId)
- );
-
-
-
-CREATE TABLE Counters (
- Counter TINYBLOB NOT NULL,
- MinValue INTEGER,
- MaxValue INTEGER,
- CurrentValue INTEGER,
- WrapCounter TINYBLOB NOT NULL,
- PRIMARY KEY (Counter(128))
- );
-
-CREATE TABLE CDImages (
- MediaId INTEGER UNSIGNED NOT NULL,
- LastBurn DATETIME NOT NULL,
- PRIMARY KEY (MediaId)
- );
-
-CREATE TABLE Status (
- JobStatus CHAR(1) BINARY NOT NULL,
- JobStatusLong BLOB,
- PRIMARY KEY (JobStatus)
- );
-
-INSERT INTO Status (JobStatus,JobStatusLong) VALUES
- ('C', 'Created, not yet running'),
- ('R', 'Running'),
- ('B', 'Blocked'),
- ('T', 'Completed successfully'),
- ('E', 'Terminated with errors'),
- ('e', 'Non-fatal error'),
- ('f', 'Fatal error'),
- ('D', 'Verify found differences'),
- ('A', 'Canceled by user'),
- ('F', 'Waiting for Client'),
- ('S', 'Waiting for Storage daemon'),
- ('m', 'Waiting for new media'),
- ('M', 'Waiting for media mount'),
- ('s', 'Waiting for storage resource'),
- ('j', 'Waiting for job resource'),
- ('c', 'Waiting for client resource'),
- ('d', 'Waiting on maximum jobs'),
- ('t', 'Waiting on start time'),
- ('p', 'Waiting on higher priority jobs');
-
-CREATE TABLE Version (
- VersionId INTEGER UNSIGNED NOT NULL
- );
-
--- Initialize Version
-INSERT INTO Version (VersionId) VALUES (9);
+USE bacula;\r
+--\r
+-- Note, we use BLOB rather than TEXT because in MySQL,\r
+-- BLOBs are identical to TEXT except that BLOB is case\r
+-- sensitive in sorts, which is what we want, and TEXT\r
+-- is case insensitive.\r
+--\r
+CREATE TABLE Filename (\r
+ FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Name BLOB NOT NULL,\r
+ PRIMARY KEY(FilenameId),\r
+ INDEX (Name(255))\r
+ );\r
+\r
+CREATE TABLE Path (\r
+ PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Path BLOB NOT NULL,\r
+ PRIMARY KEY(PathId),\r
+ INDEX (Path(255))\r
+ );\r
+\r
+\r
+CREATE TABLE File (\r
+ FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,\r
+ PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,\r
+ FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,\r
+ MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ LStat TINYBLOB NOT NULL,\r
+ MD5 TINYBLOB NOT NULL,\r
+ PRIMARY KEY(FileId),\r
+ INDEX (JobId),\r
+ INDEX (JobId, PathId, FilenameId)\r
+ );\r
+\r
+#\r
+# Possibly add one or more of the following indexes\r
+# to the above File table if your Verifies are\r
+# too slow.\r
+#\r
+# INDEX (PathId),\r
+# INDEX (FilenameId),\r
+# INDEX (FilenameId, PathId)\r
+# INDEX (JobId),\r
+#\r
+\r
+CREATE TABLE MediaType (\r
+ MediaTypeId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ MediaType TINYBLOB NOT NULL,\r
+ ReadOnly TINYINT DEFAULT 0,\r
+ PRIMARY KEY(MediaTypeId)\r
+ );\r
+\r
+CREATE TABLE Storage (\r
+ StorageId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Name TINYBLOB NOT NULL,\r
+ AutoChanger TINYINT DEFAULT 0,\r
+ PRIMARY KEY(StorageId)\r
+ );\r
+\r
+CREATE TABLE Device (\r
+ DeviceId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Name TINYBLOB NOT NULL,\r
+ MediaTypeId INTEGER UNSIGNED NOT NULL REFERENCES MediaType,\r
+ StorageId INTEGER UNSIGNED NOT NULL REFERENCES Storage,\r
+ DevMounts INTEGER UNSIGNED DEFAULT 0,\r
+ DevReadBytes BIGINT UNSIGNED DEFAULT 0,\r
+ DevWriteBytes BIGINT UNSIGNED DEFAULT 0,\r
+ DevReadBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,\r
+ DevWriteBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,\r
+ DevReadTime BIGINT UNSIGNED DEFAULT 0,\r
+ DevWriteTime BIGINT UNSIGNED DEFAULT 0,\r
+ DevReadTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,\r
+ DevWriteTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,\r
+ CleaningDate DATETIME DEFAULT 0,\r
+ CleaningPeriod BIGINT UNSIGNED DEFAULT 0,\r
+ PRIMARY KEY(DeviceId)\r
+ );\r
+\r
+\r
+CREATE TABLE Job (\r
+ JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Job TINYBLOB NOT NULL,\r
+ Name TINYBLOB NOT NULL,\r
+ Type BINARY(1) NOT NULL,\r
+ Level BINARY(1) NOT NULL,\r
+ ClientId INTEGER NULL REFERENCES Client,\r
+ JobStatus BINARY(1) NOT NULL,\r
+ SchedTime DATETIME NOT NULL,\r
+ StartTime DATETIME NULL,\r
+ EndTime DATETIME NULL,\r
+ JobTDate BIGINT UNSIGNED NOT NULL,\r
+ VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ JobBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ PoolId INTEGER UNSIGNED NULL REFERENCES Pool,\r
+ FileSetId INTEGER UNSIGNED NULL REFERENCES FileSet,\r
+ PurgedFiles TINYINT NOT NULL DEFAULT 0,\r
+ HasBase TINYINT NOT NULL DEFAULT 0,\r
+ PRIMARY KEY(JobId),\r
+ INDEX (Name(128))\r
+ );\r
+\r
+CREATE TABLE MAC (\r
+ JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ OriginalJobId INTEGER UNSIGNED NOT NULL,\r
+ JobType BINARY(1) NOT NULL,\r
+ JobLevel BINARY(1) NOT NULL,\r
+ SchedTime DATETIME NOT NULL,\r
+ StartTime DATETIME NOT NULL,\r
+ EndTime DATETIME NOT NULL,\r
+ JobTDate BIGINT UNSIGNED NOT NULL,\r
+ PRIMARY KEY(JobId)\r
+ );\r
+\r
+CREATE TABLE Location (\r
+ LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Location TINYBLOB NOT NULL,\r
+ PRIMARY KEY(LocationId)\r
+ );\r
+\r
+# \r
+CREATE TABLE FileSet (\r
+ FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ FileSet TINYBLOB NOT NULL,\r
+ MD5 TINYBLOB NOT NULL,\r
+ CreateTime DATETIME NOT NULL,\r
+ PRIMARY KEY(FileSetId)\r
+ );\r
+\r
+CREATE TABLE JobMedia (\r
+ JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,\r
+ MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media,\r
+ FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ Copy INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ Stripe INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ PRIMARY KEY(JobMediaId),\r
+ INDEX (JobId, MediaId)\r
+ );\r
+\r
+\r
+CREATE TABLE Media (\r
+ MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ VolumeName TINYBLOB NOT NULL,\r
+ Slot INTEGER NOT NULL DEFAULT 0,\r
+ PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool,\r
+ MediaType TINYBLOB NOT NULL,\r
+ MediaTypeId INTEGER UNSIGNED NOT NULL REFERENCES MediaType,\r
+ LabelType TINYINT NOT NULL DEFAULT 0,\r
+ FirstWritten DATETIME NULL,\r
+ LastWritten DATETIME NULL,\r
+ LabelDate DATETIME NULL,\r
+ VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ VolParts INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ VolCapacityBytes BIGINT UNSIGNED NOT NULL,\r
+ VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged',\r
+ 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,\r
+ Recycle TINYINT NOT NULL DEFAULT 0,\r
+ VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ InChanger TINYINT NOT NULL DEFAULT 0,\r
+ StorageId INTEGER UNSIGNED NOT NULL REFERENCES Storage,\r
+ DeviceId INTEGER UNSIGNED NOT NULL REFERENCES Device,\r
+ MediaAddressing TINYINT NOT NULL DEFAULT 0,\r
+ VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0,\r
+ EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ LocationId INTEGER UNSIGNED NOT NULL REFERENCES Location,\r
+ RecycleCount INTEGER UNSIGNED DEFAULT 0,\r
+ InitialWrite DATETIME NULL,\r
+ ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,\r
+ RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,\r
+ PRIMARY KEY(MediaId),\r
+ INDEX (PoolId)\r
+ );\r
+\r
+CREATE INDEX inx8 ON Media (PoolId);\r
+\r
+\r
+\r
+CREATE TABLE Pool (\r
+ PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Name TINYBLOB NOT NULL,\r
+ NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ UseOnce TINYINT NOT NULL,\r
+ UseCatalog TINYINT NOT NULL,\r
+ AcceptAnyVolume TINYINT DEFAULT 0,\r
+ VolRetention BIGINT UNSIGNED NOT NULL,\r
+ VolUseDuration BIGINT UNSIGNED NOT NULL,\r
+ MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,\r
+ MaxVolBytes BIGINT UNSIGNED NOT NULL,\r
+ AutoPrune TINYINT DEFAULT 0,\r
+ Recycle TINYINT DEFAULT 0,\r
+ PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL,\r
+ LabelType TINYINT NOT NULL DEFAULT 0,\r
+ LabelFormat TINYBLOB,\r
+ Enabled TINYINT DEFAULT 1,\r
+ ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,\r
+ RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,\r
+ NextPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,\r
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,\r
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,\r
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,\r
+ UNIQUE (Name(128)),\r
+ PRIMARY KEY (PoolId)\r
+ );\r
+\r
+\r
+CREATE TABLE Client (\r
+ ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Name TINYBLOB NOT NULL,\r
+ Uname TINYBLOB NOT NULL, /* full uname -a of client */\r
+ AutoPrune TINYINT DEFAULT 0,\r
+ FileRetention BIGINT UNSIGNED NOT NULL,\r
+ JobRetention BIGINT UNSIGNED NOT NULL,\r
+ UNIQUE (Name(128)),\r
+ PRIMARY KEY(ClientId)\r
+ );\r
+\r
+CREATE TABLE BaseFiles (\r
+ BaseId INTEGER UNSIGNED AUTO_INCREMENT,\r
+ BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job,\r
+ JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,\r
+ FileId INTEGER UNSIGNED NOT NULL REFERENCES File,\r
+ FileIndex INTEGER UNSIGNED,\r
+ PRIMARY KEY(BaseId)\r
+ );\r
+\r
+CREATE TABLE UnsavedFiles (\r
+ UnsavedId INTEGER UNSIGNED AUTO_INCREMENT,\r
+ JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,\r
+ PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,\r
+ FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,\r
+ PRIMARY KEY (UnsavedId)\r
+ );\r
+\r
+\r
+\r
+CREATE TABLE Counters (\r
+ Counter TINYBLOB NOT NULL,\r
+ MinValue INTEGER,\r
+ MaxValue INTEGER,\r
+ CurrentValue INTEGER,\r
+ WrapCounter TINYBLOB NOT NULL,\r
+ PRIMARY KEY (Counter(128))\r
+ );\r
+\r
+CREATE TABLE CDImages (\r
+ MediaId INTEGER UNSIGNED NOT NULL,\r
+ LastBurn DATETIME NOT NULL,\r
+ PRIMARY KEY (MediaId)\r
+ );\r
+\r
+CREATE TABLE Status (\r
+ JobStatus CHAR(1) BINARY NOT NULL,\r
+ JobStatusLong BLOB, \r
+ PRIMARY KEY (JobStatus)\r
+ );\r
+\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('C', 'Created, not yet running'),\r
+ ('R', 'Running'),\r
+ ('B', 'Blocked'),\r
+ ('T', 'Completed successfully'),\r
+ ('E', 'Terminated with errors'),\r
+ ('e', 'Non-fatal error'),\r
+ ('f', 'Fatal error'),\r
+ ('D', 'Verify found differences'),\r
+ ('A', 'Canceled by user'),\r
+ ('F', 'Waiting for Client'),\r
+ ('S', 'Waiting for Storage daemon'),\r
+ ('m', 'Waiting for new media'),\r
+ ('M', 'Waiting for media mount'),\r
+ ('s', 'Waiting for storage resource'),\r
+ ('j', 'Waiting for job resource'),\r
+ ('c', 'Waiting for client resource'),\r
+ ('d', 'Waiting on maximum jobs'),\r
+ ('t', 'Waiting on start time'),\r
+ ('p', 'Waiting on higher priority jobs');\r
+\r
+CREATE TABLE Version (\r
+ VersionId INTEGER UNSIGNED NOT NULL \r
+ );\r
+\r
+-- Initialize Version \r
+INSERT INTO Version (VersionId) VALUES (9);\r
--- /dev/null
+rem \r
+rem shell script to create Bacula PostgreSQL tables\r
+rem \r
+bindir=@SQL_BINDIR@\r
+\r
+$bindir/psql -f make_postgresql_tables.sql -d bacula $*\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Creation of Bacula PostgreSQL tables succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Creation of Bacula PostgreSQL tables failed."\r
+EXIT /b 1\r
--- /dev/null
+CREATE TABLE filename\r
+(\r
+ filenameid serial not null,\r
+ name text not null,\r
+ primary key (filenameid)\r
+);\r
+\r
+CREATE INDEX filename_name_idx on filename (name);\r
+\r
+CREATE TABLE path\r
+(\r
+ pathid serial not null,\r
+ path text not null,\r
+ primary key (pathid)\r
+);\r
+\r
+CREATE INDEX path_name_idx on path (path);\r
+\r
+CREATE TABLE file\r
+(\r
+ fileid serial not null,\r
+ fileindex integer not null default 0,\r
+ jobid integer not null,\r
+ pathid integer not null,\r
+ filenameid integer not null,\r
+ markid integer not null default 0,\r
+ lstat text not null,\r
+ md5 text not null,\r
+ primary key (fileid)\r
+);\r
+\r
+CREATE INDEX file_jobid_idx on file (jobid);\r
+CREATE INDEX file_fp_idx on file (filenameid, pathid);\r
+\r
+--\r
+-- Possibly add one or more of the following indexes\r
+-- if your Verifies are too slow.\r
+--\r
+-- CREATE INDEX file_pathid_idx on file(pathid);\r
+-- CREATE INDEX file_filenameid_idx on file(filenameid);\r
+-- CREATE INDEX file_jpfid_idx on file (jobid, pathid, filenameid);\r
+\r
+CREATE TABLE job\r
+(\r
+ jobid serial not null,\r
+ job text not null,\r
+ name text not null,\r
+ type char(1) not null,\r
+ level char(1) not null,\r
+ clientid integer default 0,\r
+ jobstatus char(1) not null,\r
+ schedtime timestamp without time zone,\r
+ starttime timestamp without time zone,\r
+ endtime timestamp without time zone,\r
+ realendtime timestamp without time zone,\r
+ jobtdate bigint default 0,\r
+ volsessionid integer default 0,\r
+ volsessiontime integer default 0,\r
+ jobfiles integer default 0,\r
+ jobbytes bigint default 0,\r
+ joberrors integer default 0,\r
+ jobmissingfiles integer default 0,\r
+ poolid integer default 0,\r
+ filesetid integer default 0,\r
+ purgedfiles smallint default 0,\r
+ hasbase smallint default 0,\r
+ priorjobid integer default 0,\r
+ primary key (jobid)\r
+);\r
+\r
+CREATE INDEX job_name_idx on job (name);\r
+\r
+CREATE TABLE Location (\r
+ LocationId serial not null,\r
+ Location text not null,\r
+ Cost integer default 0,\r
+ Enabled smallint,\r
+ primary key (LocationId)\r
+);\r
+\r
+\r
+CREATE TABLE fileset\r
+(\r
+ filesetid serial not null,\r
+ fileset text not null,\r
+ md5 text not null,\r
+ createtime timestamp without time zone not null,\r
+ primary key (filesetid)\r
+);\r
+\r
+CREATE INDEX fileset_name_idx on fileset (fileset);\r
+\r
+CREATE TABLE jobmedia\r
+(\r
+ jobmediaid serial not null,\r
+ jobid integer not null,\r
+ mediaid integer not null,\r
+ firstindex integer default 0,\r
+ lastindex integer default 0,\r
+ startfile integer default 0,\r
+ endfile integer default 0,\r
+ startblock bigint default 0,\r
+ endblock bigint default 0,\r
+ volindex integer default 0,\r
+ copy integer default 0,\r
+ primary key (jobmediaid)\r
+);\r
+\r
+CREATE INDEX job_media_job_id_media_id_idx on jobmedia (jobid, mediaid);\r
+\r
+CREATE TABLE media\r
+(\r
+ mediaid serial not null,\r
+ volumename text not null,\r
+ slot integer default 0,\r
+ poolid integer default 0,\r
+ mediatype text not null,\r
+ mediatypeid integer default 0,\r
+ labeltype integer default 0,\r
+ firstwritten timestamp without time zone,\r
+ lastwritten timestamp without time zone,\r
+ labeldate timestamp without time zone,\r
+ voljobs integer default 0,\r
+ volfiles integer default 0,\r
+ volblocks integer default 0,\r
+ volmounts integer default 0,\r
+ volbytes bigint default 0,\r
+ volparts integer default 0,\r
+ volerrors integer default 0,\r
+ volwrites integer default 0,\r
+ volcapacitybytes bigint default 0,\r
+ volstatus text not null\r
+ check (volstatus in ('Full','Archive','Append',\r
+ 'Recycle','Purged','Read-Only','Disabled',\r
+ 'Error','Busy','Used','Cleaning','Scratch')),\r
+ enabled smallint default 1,\r
+ recycle smallint default 0,\r
+ volretention bigint default 0,\r
+ voluseduration bigint default 0,\r
+ maxvoljobs integer default 0,\r
+ maxvolfiles integer default 0,\r
+ maxvolbytes bigint default 0,\r
+ inchanger smallint default 0,\r
+ StorageId integer default 0,\r
+ DeviceId integer default 0,\r
+ mediaaddressing smallint default 0,\r
+ volreadtime bigint default 0,\r
+ volwritetime bigint default 0,\r
+ endfile integer default 0,\r
+ endblock bigint default 0,\r
+ LocationId integer default 0,\r
+ recyclecount integer default 0,\r
+ initialwrite timestamp without time zone,\r
+ scratchpoolid integer default 0,\r
+ recyclepoolid integer default 0,\r
+ comment text,\r
+ primary key (mediaid)\r
+);\r
+\r
+create unique index media_volumename_id on media (volumename);\r
+\r
+ \r
+CREATE TABLE MediaType (\r
+ MediaTypeId SERIAL,\r
+ MediaType TEXT NOT NULL,\r
+ ReadOnly INTEGER DEFAULT 0,\r
+ PRIMARY KEY(MediaTypeId)\r
+ );\r
+\r
+CREATE TABLE Storage (\r
+ StorageId SERIAL,\r
+ Name TEXT NOT NULL,\r
+ AutoChanger INTEGER DEFAULT 0,\r
+ PRIMARY KEY(StorageId)\r
+ );\r
+\r
+CREATE TABLE Device (\r
+ DeviceId SERIAL,\r
+ Name TEXT NOT NULL,\r
+ MediaTypeId INTEGER NOT NULL,\r
+ StorageId INTEGER NOT NULL,\r
+ DevMounts INTEGER NOT NULL DEFAULT 0,\r
+ DevReadBytes BIGINT NOT NULL DEFAULT 0,\r
+ DevWriteBytes BIGINT NOT NULL DEFAULT 0,\r
+ DevReadBytesSinceCleaning BIGINT NOT NULL DEFAULT 0,\r
+ DevWriteBytesSinceCleaning BIGINT NOT NULL DEFAULT 0,\r
+ DevReadTime BIGINT NOT NULL DEFAULT 0,\r
+ DevWriteTime BIGINT NOT NULL DEFAULT 0,\r
+ DevReadTimeSinceCleaning BIGINT NOT NULL DEFAULT 0,\r
+ DevWriteTimeSinceCleaning BIGINT NOT NULL DEFAULT 0,\r
+ CleaningDate timestamp without time zone,\r
+ CleaningPeriod BIGINT NOT NULL DEFAULT 0,\r
+ PRIMARY KEY(DeviceId)\r
+ );\r
+\r
+\r
+CREATE TABLE pool\r
+(\r
+ poolid serial not null,\r
+ name text not null,\r
+ numvols integer default 0,\r
+ maxvols integer default 0,\r
+ useonce smallint default 0,\r
+ usecatalog smallint default 0,\r
+ acceptanyvolume smallint default 0,\r
+ volretention bigint default 0,\r
+ voluseduration bigint default 0,\r
+ maxvoljobs integer default 0,\r
+ maxvolfiles integer default 0,\r
+ maxvolbytes bigint default 0,\r
+ autoprune smallint default 0,\r
+ recycle smallint default 0,\r
+ pooltype text \r
+ check (pooltype in ('Backup','Copy','Cloned','Archive','Migration','Scratch')),\r
+ labeltype integer default 0,\r
+ labelformat text not null,\r
+ enabled smallint default 1,\r
+ scratchpoolid integer default 0,\r
+ recyclepoolid integer default 0,\r
+ NextPoolId integer default 0,\r
+ MigrationHighBytes BIGINT DEFAULT 0,\r
+ MigrationLowBytes BIGINT DEFAULT 0,\r
+ MigrationTime BIGINT DEFAULT 0,\r
+ primary key (poolid)\r
+);\r
+\r
+CREATE INDEX pool_name_idx on pool (name);\r
+\r
+CREATE TABLE client\r
+(\r
+ clientid serial not null,\r
+ name text not null,\r
+ uname text not null,\r
+ autoprune smallint default 0,\r
+ fileretention bigint default 0,\r
+ jobretention bigint default 0,\r
+ primary key (clientid)\r
+);\r
+\r
+create unique index client_name_idx on client (name);\r
+\r
+CREATE TABLE Log\r
+(\r
+ LogId serial not null,\r
+ JobId integer not null,\r
+ Time timestamp without time zone,\r
+ LogText text not null,\r
+ primary key (LogId)\r
+);\r
+create index log_name_idx on Log (JobId);\r
+\r
+CREATE TABLE LocationLog (\r
+ LocLogId SERIAL NOT NULL,\r
+ Date timestamp without time zone,\r
+ Comment TEXT NOT NULL,\r
+ MediaId INTEGER DEFAULT 0,\r
+ LocationId INTEGER DEFAULT 0,\r
+ newvolstatus text not null\r
+ check (newvolstatus in ('Full','Archive','Append',\r
+ 'Recycle','Purged','Read-Only','Disabled',\r
+ 'Error','Busy','Used','Cleaning','Scratch')),\r
+ newenabled smallint,\r
+ PRIMARY KEY(LocLogId)\r
+);\r
+\r
+\r
+\r
+CREATE TABLE counters\r
+(\r
+ counter text not null,\r
+ minvalue integer default 0,\r
+ maxvalue integer default 0,\r
+ currentvalue integer default 0,\r
+ wrapcounter text not null,\r
+ primary key (counter)\r
+);\r
+\r
+\r
+\r
+CREATE TABLE basefiles\r
+(\r
+ baseid serial not null,\r
+ jobid integer not null,\r
+ fileid integer not null,\r
+ fileindex integer ,\r
+ basejobid integer ,\r
+ primary key (baseid)\r
+);\r
+\r
+CREATE TABLE unsavedfiles\r
+(\r
+ UnsavedId integer not null,\r
+ jobid integer not null,\r
+ pathid integer not null,\r
+ filenameid integer not null,\r
+ primary key (UnsavedId)\r
+);\r
+\r
+CREATE TABLE CDImages \r
+(\r
+ MediaId integer not null,\r
+ LastBurn timestamp without time zone not null,\r
+ primary key (MediaId)\r
+);\r
+\r
+\r
+CREATE TABLE version\r
+(\r
+ versionid integer not null\r
+);\r
+\r
+CREATE TABLE Status (\r
+ JobStatus CHAR(1) NOT NULL,\r
+ JobStatusLong TEXT, \r
+ PRIMARY KEY (JobStatus)\r
+ );\r
+\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('C', 'Created, not yet running');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('R', 'Running');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('B', 'Blocked');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('T', 'Completed successfully');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('E', 'Terminated with errors');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('e', 'Non-fatal error');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('f', 'Fatal error');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('D', 'Verify found differences');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('A', 'Canceled by user');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('F', 'Waiting for Client');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('S', 'Waiting for Storage daemon');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('m', 'Waiting for new media');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('M', 'Waiting for media mount');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('s', 'Waiting for storage resource');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('j', 'Waiting for job resource');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('c', 'Waiting for client resource');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('d', 'Waiting on maximum jobs');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('t', 'Waiting on start time');\r
+INSERT INTO Status (JobStatus,JobStatusLong) VALUES\r
+ ('p', 'Waiting on higher priority jobs');\r
+\r
+\r
+INSERT INTO Version (VersionId) VALUES (10);\r
+\r
+-- Make sure we have appropriate permissions\r
--- /dev/null
+rem \r
+rem Shell script to update bdb tables\r
+rem Nothing to do here.\r
+rem \r
-rem
-rem Script to update MySQL tables from version 1.38 to 1.39
-rem
-echo " "
-echo "This script will update a Bacula MySQL database from version 9 to 9"
-echo "Depending on the size of your database,"
-echo "this script may take several minutes to run."
-echo " "
-
-if %SQL_BINDIR%/mysql $* -f < update_mysql_tables.sql
-set RESULT=%ERRORLEVEL%
-if %RESULT% GTR 0 goto :ERROR
-echo "Update of Bacula MySQL tables succeeded."
-exit /b 0
-
-:ERROR
-echo "Update of Bacula MySQL tables failed."
-exit /b %RESULT%
+rem\r
+rem Script to update MySQL tables from version 1.38 to 1.39\r
+rem\r
+echo " "\r
+echo "This script will update a Bacula MySQL database from version 9 to 9"\r
+echo "Depending on the size of your database,"\r
+echo "this script may take several minutes to run."\r
+echo " "\r
+\r
+if %SQL_BINDIR%/mysql $* -f < update_mysql_tables.sql\r
+set RESULT=%ERRORLEVEL%\r
+if %RESULT% GTR 0 goto :ERROR\r
+echo "Update of Bacula MySQL tables succeeded."\r
+exit /b 0\r
+\r
+:ERROR\r
+echo "Update of Bacula MySQL tables failed."\r
+exit /b %RESULT%\r
-USE bacula;
-
-ALTER TABLE Media ADD COLUMN MediaTypeId INTEGER UNSIGNED DEFAULT 0 REFERENCES MediaType;
-ALTER TABLE Media ADD COLUMN DeviceId INTEGER UNSIGNED DEFAULT 0 REFERENCES Device;
-ALTER TABLE Media ADD COLUMN LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES Location;
-ALTER TABLE Media ADD COLUMN RecycleCount INTEGER UNSIGNED DEFAULT 0;
-ALTER TABLE Media ADD COLUMN InitialWrite DATETIME DEFAULT 0;
-ALTER TABLE Media ADD COLUMN ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;
-ALTER TABLE Media ADD COLUMN RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;
-
-
-CREATE TABLE MAC (
- JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- OriginalJobId INTEGER UNSIGNED NOT NULL,
- JobType BINARY(1) NOT NULL,
- JobLevel BINARY(1) NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME NOT NULL,
- EndTime DATETIME NOT NULL,
- JobTDate BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY(JobId)
- );
-
-CREATE TABLE Location (
- LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Location TINYBLOB NOT NULL,
- PRIMARY KEY(LocationId)
- );
-
-
-DELETE FROM Version;
-INSERT INTO Version (VersionId) VALUES (9);
+USE bacula;\r
+\r
+ALTER TABLE Media ADD COLUMN MediaTypeId INTEGER UNSIGNED DEFAULT 0 REFERENCES MediaType;\r
+ALTER TABLE Media ADD COLUMN DeviceId INTEGER UNSIGNED DEFAULT 0 REFERENCES Device;\r
+ALTER TABLE Media ADD COLUMN LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES Location;\r
+ALTER TABLE Media ADD COLUMN RecycleCount INTEGER UNSIGNED DEFAULT 0;\r
+ALTER TABLE Media ADD COLUMN InitialWrite DATETIME DEFAULT 0;\r
+ALTER TABLE Media ADD COLUMN ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;\r
+ALTER TABLE Media ADD COLUMN RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;\r
+\r
+\r
+CREATE TABLE MAC (\r
+ JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ OriginalJobId INTEGER UNSIGNED NOT NULL,\r
+ JobType BINARY(1) NOT NULL,\r
+ JobLevel BINARY(1) NOT NULL,\r
+ SchedTime DATETIME NOT NULL,\r
+ StartTime DATETIME NOT NULL,\r
+ EndTime DATETIME NOT NULL,\r
+ JobTDate BIGINT UNSIGNED NOT NULL,\r
+ PRIMARY KEY(JobId)\r
+ );\r
+\r
+CREATE TABLE Location (\r
+ LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,\r
+ Location TINYBLOB NOT NULL,\r
+ PRIMARY KEY(LocationId)\r
+ );\r
+\r
+\r
+DELETE FROM Version;\r
+INSERT INTO Version (VersionId) VALUES (9);\r
--- /dev/null
+rem \r
+rem Shell script to update PostgreSQL tables from version 1.38 to 1.39\r
+rem \r
+\r
+echo " "\r
+echo "This script will update a Bacula PostgreSQL database from version 9 to 10"\r
+echo " which is needed to convert from Bacula version 1.38.x to 1.39.x or higher"\r
+echo "Depending on the size of your database,"\r
+echo "this script may take several minutes to run."\r
+echo " "\r
+bindir=@SQL_BINDIR@\r
+\r
+$bindir/psql -f update_postgresql_tables.sql -d bacula $*\r
+if ERRORLEVEL 1 GOTO :ERROR\r
+echo "Update of Bacula PostgreSQL tables succeeded."\r
+EXIT /b 0\r
+GOTO :EOF\r
+\r
+:ERROR\r
+echo "Update of Bacula PostgreSQL tables failed."\r
+EXIT /b 1\r
--- /dev/null
+ALTER TABLE media ADD COLUMN DeviceId integer;\r
+UPDATE media SET DeviceId=0;\r
+ALTER TABLE media ADD COLUMN MediaTypeId integer;\r
+UPDATE media SET MediaTypeId=0;\r
+ALTER TABLE media ADD COLUMN LocationId integer;\r
+UPDATE media SET LocationId=0;\r
+ALTER TABLE media ADD COLUMN RecycleCount integer;\r
+UPDATE media SET RecycleCount=0;\r
+ALTER TABLE media ADD COLUMN InitialWrite timestamp without time zone;\r
+ALTER TABLE media ADD COLUMN scratchpoolid integer;\r
+UPDATE media SET scratchpoolid=0;\r
+ALTER TABLE media ADD COLUMN recyclepoolid integer;\r
+UPDATE media SET recyclepoolid=0;\r
+ALTER TABLE media ADD COLUMN enabled integer;\r
+UPDATE media SET enabled=1;\r
+ALTER TABLE media ADD COLUMN Comment TEXT;\r
+\r
+ALTER TABLE job ADD COLUMN RealEndTime timestamp without time zone;\r
+ALTER TABLE job ADD COLUMN PriorJobId integer;\r
+UPDATE job SET PriorJobId=0;\r
+\r
+ALTER TABLE jobmedia DROP COLUMN Stripe;\r
+\r
+CREATE TABLE Location (\r
+ LocationId SERIAL NOT NULL,\r
+ Location TEXT NOT NULL,\r
+ Cost integer default 0,\r
+ Enabled integer,\r
+ PRIMARY KEY (LocationId)\r
+);\r
+\r
+CREATE TABLE LocationLog (\r
+ LocLogId SERIAL NOT NULL,\r
+ Date timestamp without time zone,\r
+ Comment TEXT NOT NULL,\r
+ MediaId INTEGER DEFAULT 0,\r
+ LocationId INTEGER DEFAULT 0,\r
+ newvolstatus text not null\r
+ check (newvolstatus in ('Full','Archive','Append',\r
+ 'Recycle','Purged','Read-Only','Disabled',\r
+ 'Error','Busy','Used','Cleaning','Scratch')),\r
+ newenabled smallint,\r
+ PRIMARY KEY(LocLogId)\r
+);\r
+\r
+\r
+CREATE TABLE Log\r
+(\r
+ LogId serial not null,\r
+ JobId integer not null,\r
+ Time timestamp without time zone,\r
+ LogText text not null,\r
+ primary key (LogId)\r
+);\r
+create index log_name_idx on Log (JobId);\r
+\r
+\r
+DELETE FROM version;\r
+INSERT INTO version (versionId) VALUES (10);\r
+\r
+vacuum;\r
*win32_name++ = '\\';
name += 4;
+ } else if (g_platform_id != VER_PLATFORM_WIN32_WINDOWS &&
+ g_pVSSPathConvert == NULL) {
+ /* allow path to be 32767 bytes */
+ *win32_name++ = '\\';
+ *win32_name++ = '\\';
+ *win32_name++ = '?';
+ *win32_name++ = '\\';
}
while (*name) {
g_pVSSPathConvert(pszBuf, tname, dwSize);
free_pool_memory(pszBuf);
}
+
Dmsg1(100, "Leave cvt_u_to_win32_path path=%s\n", tname);
}
char *tspec = (char *)malloc(max_len);
if (tspec == NULL) return NULL;
- if (g_platform_id != VER_PLATFORM_WIN32_WINDOWS) {
-#ifdef WIN32_VSS
- /* will append \\?\ at front itself */
- conv_unix_to_win32_path(path, tspec, max_len-4);
- Dmsg1(100, "win32 path=%s\n", tspec);
-#else
- /* allow path to be 32767 bytes */
- tspec[0] = '\\';
- tspec[1] = '\\';
- tspec[2] = '?';
- tspec[3] = '\\';
- tspec[4] = 0;
- conv_unix_to_win32_path(path, tspec+4, max_len-4);
- Dmsg1(100, "win32 path=%s\n", tspec);
-#endif
- } else {
- conv_unix_to_win32_path(path, tspec, max_len);
- Dmsg1(100, "win32 path=%s\n", tspec);
- }
+ conv_unix_to_win32_path(path, tspec, max_len);
+ Dmsg1(100, "win32 path=%s\n", tspec);
// add backslash only if there is none yet (think of c:\)
if (tspec[strlen(tspec)-1] != '\\')
DEFINES = \
-DUSING_DLL \
+ -DUSING_CATS \
$(HAVES)
VPATH = ../../dird
$(OBJDIR)/restore.o \
$(OBJDIR)/run_conf.o \
$(OBJDIR)/scheduler.o \
- $(OBJDIR)/sql_cmds.o \
$(OBJDIR)/ua_acl.o \
$(OBJDIR)/ua_cmds.o \
$(OBJDIR)/ua_dotcmds.o \
# Rules
#
-$(BINDIR)/bacula-dir.exe: $(DIRD_OBJS) $(LIBS_BACULA)
+$(BINDIR)/bacula-dir.exe: $(DIRD_OBJS) $(LIBS_CATS) $(LIBS_BACULA)
$(call link_winapp,$(DIRD_LIBS))
$(OBJDIR)/winres.res: ../libwin32/winres.rc
$(INCLUDE_PTHREADS) \
$(INCLUDE_BACULA) \
$(INCLUDE_ZLIB) \
- $(INCLUDE_OPENSSL) \
- $(INCLUDE_MYSQL)
+ $(INCLUDE_OPENSSL)
DEFINES = \
$(HAVES)
-VPATH = ../../cats ../compat ../../findlib ../../lib
+VPATH = ../compat ../../findlib ../../lib
######################################################################
# Files files in src/lib
-CATS_OBJS = \
- $(OBJDIR)/bdb.o \
- $(OBJDIR)/bdb_create.o \
- $(OBJDIR)/bdb_delete.o \
- $(OBJDIR)/bdb_find.o \
- $(OBJDIR)/bdb_get.o \
- $(OBJDIR)/bdb_list.o \
- $(OBJDIR)/bdb_update.o \
- $(OBJDIR)/mysql.o \
- $(OBJDIR)/postgresql.o \
- $(OBJDIR)/sql.o \
- $(OBJDIR)/sql_create.o \
- $(OBJDIR)/sql_delete.o \
- $(OBJDIR)/sql_find.o \
- $(OBJDIR)/sql_get.o \
- $(OBJDIR)/sql_list.o \
- $(OBJDIR)/sql_update.o \
- $(OBJDIR)/sqlite.o
-
COMPAT_OBJS = \
$(OBJDIR)/compat.o \
$(OBJDIR)/print.o \
$(OBJDIR)/workq.o
DLL_OBJS = \
- $(CATS_OBJS) $(COMPAT_OBJS) $(FIND_OBJS) $(LIB_OBJS)
+ $(COMPAT_OBJS) $(FIND_OBJS) $(LIB_OBJS)
STATIC_OBJS = \
$(OBJDIR)/parse_conf.o \
LIBS_DLL = \
$(LIBS_SSL) \
$(LIBS_CRYPTO) \
- $(LIBS_SQL) \
$(LIBS_PTHREADS) \
$(LIBS_ZLIB) \
-lwsock32 \
@echo "Linking $@"
$(call checkdir,$@)
$(ECHO_CMD)$(CXX) $(LDFLAGS) -mdll -mwindows -Wl,--out-implib,$(OBJDIR)/bacula.a $^ $(LIBS_DLL) -o $@
- $(makedbg)
include ../Makefile.rules
LIBRARY bacula.dll
EXPORTS
-;
-; cats
-;
-; bdb_create.c:
-
-; bdb_delete.c:
-
-; bdb_find.c:
-
-; bdb_get.c:
-
-; bdb_list.c:
-
-; bdb.c:
-
-; bdb_update.c:
-
-; mysql.c:
-_Z12db_sql_queryP4B_DBPKcPFiPviPPcES3_
-;_Z13db_next_indexP3JCRP4B_DBPcS3_
-_Z16db_escape_stringPcS_i
-_Z16db_init_databaseP3JCRPKcS2_S2_S2_iS2_i
-_Z16db_open_databaseP3JCRP4B_DB
-_Z17db_close_databaseP3JCRP4B_DB
-
-; postgresql.c:
-
-; sql.c:
-_Z10_db_unlockPKciP4B_DB
-_Z11db_strerrorP4B_DB
-;_Z11list_dashesP4B_DBPFvPvPKcES1_
-;_Z11list_resultP3JCRP4B_DBPFvPvPKcES3_11e_list_type
-_Z16db_int64_handlerPviPPc
-_Z18db_end_transactionP3JCRP4B_DB
-;_Z18get_sql_record_maxP3JCRP4B_DB
-;_Z19split_path_and_fileP3JCRP4B_DBPKc
-;_Z20check_tables_versionP3JCRP4B_DB
-_Z20db_start_transactionP3JCRP4B_DB
-;_Z7QueryDBPKciP3JCRP4B_DBPc
-_Z8_db_lockPKciP4B_DB
-;_Z8DeleteDBPKciP3JCRP4B_DBPc
-;_Z8InsertDBPKciP3JCRP4B_DBPc
-;_Z8UpdateDBPKciP3JCRP4B_DBPc
-
-; sql_create.c:
-_Z20db_create_job_recordP3JCRP4B_DBP7JOB_DBR
-_Z21db_create_pool_recordP3JCRP4B_DBP8POOL_DBR
-_Z22db_create_media_recordP3JCRP4B_DBP9MEDIA_DBR
-_Z23db_create_client_recordP3JCRP4B_DBP10CLIENT_DBR
-;_Z23db_create_device_recordP3JCRP4B_DBP10DEVICE_DBR
-_Z24db_create_counter_recordP3JCRP4B_DBP11COUNTER_DBR
-_Z24db_create_fileset_recordP3JCRP4B_DBP11FILESET_DBR
-_Z24db_create_storage_recordP3JCRP4B_DBP11STORAGE_DBR
-_Z25db_create_jobmedia_recordP3JCRP4B_DBP12JOBMEDIA_DBR
-_Z26db_create_mediatype_recordP3JCRP4B_DBP13MEDIATYPE_DBR
-_Z32db_create_file_attributes_recordP3JCRP4B_DBP8ATTR_DBR
-
-; sql_delete.c:
-_Z21db_delete_pool_recordP3JCRP4B_DBP8POOL_DBR
-;_Z21db_purge_media_recordP3JCRP4B_DBP9MEDIA_DBR
-_Z22db_delete_media_recordP3JCRP4B_DBP9MEDIA_DBR
-
-; sql_find.c:
-_Z18db_find_last_jobidP3JCRP4B_DBPKcP7JOB_DBR
-_Z19db_find_next_volumeP3JCRP4B_DBibP9MEDIA_DBR
-_Z22db_find_job_start_timeP3JCRP4B_DBP7JOB_DBRPPc
-_Z24db_find_failed_job_sinceP3JCRP4B_DBP7JOB_DBRPcRi
-
-; sql_get.c:
-_Z15db_get_pool_idsP3JCRP4B_DBPiPPj
-_Z16db_get_media_idsP3JCRP4B_DBjPiPPj
-_Z17db_get_client_idsP3JCRP4B_DBPiPPj
-_Z17db_get_job_recordP3JCRP4B_DBP7JOB_DBR
-_Z18db_get_pool_recordP3JCRP4B_DBP8POOL_DBR
-_Z19db_get_media_recordP3JCRP4B_DBP9MEDIA_DBR
-_Z20db_get_client_recordP3JCRP4B_DBP10CLIENT_DBR
-;_Z21db_get_counter_recordP3JCRP4B_DBP11COUNTER_DBR
-_Z21db_get_fileset_recordP3JCRP4B_DBP11FILESET_DBR
-_Z23db_get_job_volume_namesP3JCRP4B_DBjPPc
-;_Z23db_get_num_pool_recordsP3JCRP4B_DB
-;_Z24db_get_num_media_recordsP3JCRP4B_DB
-_Z28db_get_job_volume_parametersP3JCRP4B_DBjPP10VOL_PARAMS
-_Z29db_get_file_attributes_recordP3JCRP4B_DBPcP7JOB_DBRP8FILE_DBR
-
-; sqlite.c:
-
-; sql_list.c:
-_Z17db_list_sql_queryP3JCRP4B_DBPKcPFvPvS4_ES5_i11e_list_type
-_Z18db_list_job_totalsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_
-_Z19db_list_job_recordsP3JCRP4B_DBP7JOB_DBRPFvPvPKcES5_11e_list_type
-_Z20db_list_pool_recordsP3JCRP4B_DBP8POOL_DBRPFvPvPKcES5_11e_list_type
-_Z21db_list_files_for_jobP3JCRP4B_DBjPFvPvPKcES3_
-_Z21db_list_media_recordsP3JCRP4B_DBP9MEDIA_DBRPFvPvPKcES5_11e_list_type
-_Z22db_list_client_recordsP3JCRP4B_DBPFvPvPKcES3_11e_list_type
-_Z24db_list_jobmedia_recordsP3JCRP4B_DBjPFvPvPKcES3_11e_list_type
-
-; sql_update.c:
-_Z19db_mark_file_recordP3JCRP4B_DBjj
-_Z21db_update_pool_recordP3JCRP4B_DBP8POOL_DBR
-_Z22db_update_media_recordP3JCRP4B_DBP9MEDIA_DBR
-_Z23db_update_client_recordP3JCRP4B_DBP10CLIENT_DBR
-_Z24db_make_inchanger_uniqueP3JCRP4B_DBP9MEDIA_DBR
-_Z24db_update_counter_recordP3JCRP4B_DBP11COUNTER_DBR
-_Z24db_update_job_end_recordP3JCRP4B_DBP7JOB_DBR
-_Z24db_update_media_defaultsP3JCRP4B_DBP9MEDIA_DBR
-_Z24db_update_storage_recordP3JCRP4B_DBP11STORAGE_DBR
-_Z26db_update_job_start_recordP3JCRP4B_DBP7JOB_DBR
-_Z28db_add_digest_to_file_recordP3JCRP4B_DBjPci
-
;
; compat
;
_Z4dropPcS_
_Z6bfgetsPciP6_iobuf
;_Z7bcallocjj
-;_Z7bstrcmpPKcS0_
-;_Z7cstrlenPKc
+_Z7bstrcmpPKcS0_
+_Z7cstrlenPKc
_Z8b_mallocPKcij
_Z8breallocPvj
_Z8bstrncatPcPKci
;_ZNK5dlist4prevEPKv
; edit.c:
-;_Z10add_commasPcS_
+_Z10add_commasPcS_
_Z10edit_int64xPc
_Z10edit_utimexPci
_Z11edit_uint64yPc
_Z23term_python_interpreterv
; queue.c:
-;_Z5qnextP7b_queueS0_
-;_Z7qdchainP7b_queue
+_Z5qnextP7b_queueS0_
+_Z7qdchainP7b_queue
_Z7qinsertP7b_queueS0_
;_Z7qremoveP7b_queue
BACULA_BINARIES := \
bacula.dll \
+ cats_mysql.dll \
+ cats_pgsql.dll \
+ cats_bdb.dll \
bacula-dir.exe \
bacula-fd.exe \
bacula-sd.exe \
NONGCC_BINARIES := \
libmysql.dll
+NONGCC_LIBRARIES := \
+ libpq.dll
+
SCRIPTS := \
- ../scripts/mtx-changer.cmd.in \
+ ../scripts/mtx-changer.cmd \
+ ../cats/create_bdb_database.cmd \
+ ../cats/drop_bdb_database.cmd \
+ ../cats/make_bdb_tables.cmd \
+ ../cats/drop_bdb_tables.cmd \
+ ../cats/update_bdb_tables.cmd \
+ ../cats/grant_bdb_privileges.cmd \
../cats/create_mysql_database.cmd \
../cats/drop_mysql_database.cmd \
../cats/make_mysql_tables.cmd \
../cats/update_mysql_tables.sql \
../cats/grant_mysql_privileges.cmd \
../cats/grant_mysql_privileges.sql \
+ ../cats/create_postgresql_database.cmd \
+ ../cats/drop_postgresql_database.cmd \
+ ../cats/make_postgresql_tables.cmd \
+ ../cats/make_postgresql_tables.sql \
+ ../cats/drop_postgresql_tables.cmd \
+ ../cats/drop_postgresql_tables.sql \
+ ../cats/update_postgresql_tables.cmd \
+ ../cats/update_postgresql_tables.sql \
+ ../cats/grant_postgresql_privileges.cmd \
+ ../cats/grant_postgresql_privileges.sql \
../cats/make_catalog_backup.cmd \
../cats/delete_catalog_backup.cmd
clean:
@echo "Cleaning `pwd`"
- $(CMD_ECHO)-rm -f $(INSTALL_EXE) $(BACULA_BINARIES) $(DEPKGS_BINARIES) $(NONGCC_BINARIES)
+ $(CMD_ECHO)-rm -f $(INSTALL_EXE) $(BACULA_BINARIES) $(DEPKGS_BINARIES) $(NONGCC_BINARIES) $(NONGCC_LIBRARIES)
$(CMD_ECHO)-rm -f $(BACULA_BINARIES) $(addsuffix .dbg,$(basename $(BACULA_BINARIES)))
$(CMD_ECHO)-rm -f $(DEPKGS_BINARIES) $(addsuffix .dbg,$(basename $(DEPKGS_BINARIES)))
$(foreach file,$(addprefix $(DEPKGS)/bin/, $(NONGCC_BINARIES)),$(eval $(call Copy_Binary,$(file))))
+$(foreach file,$(addprefix $(DEPKGS)/lib/, $(NONGCC_LIBRARIES)),$(eval $(call Copy_Binary,$(file))))
+
$(foreach file,$(addprefix $(BINDIR)/, $(BACULA_BINARIES)),$(eval $(call Strip_Binary,$(file))))
-$(INSTALL_EXE): winbacula.nsi $(BACULA_BINARIES) $(SCRIPTS) $(DEPKGS_BINARIES) $(NONGCC_BINARIES)
+$(INSTALL_EXE): winbacula.nsi $(BACULA_BINARIES) $(SCRIPTS) $(DEPKGS_BINARIES) $(NONGCC_BINARIES) $(NONGCC_LIBRARIES)
NSISDIR=$(NSIS_DIR) \
$(NSIS_DIR)/makensis -V3 $(DEFINES) winbacula.nsi
Var OptSilent
Var DependenciesDone
+Var DatabaseDone
Var OsIsNT
StrCpy $OptStart 0
StrCpy $OptSilent 0
StrCpy $DependenciesDone 0
+ StrCpy $DatabaseDone 0
StrCpy $OsIsNT 0
${GetParameters} $R0
File "${MINGW_BIN}\..\mingw32\bin\mingwm10.dll"
!endif
File "libeay32.dll"
- File "libmysql.dll"
File "pthreadGCE.dll"
File "ssleay32.dll"
File "zlib1.dll"
${EndIf}
FunctionEnd
+Function InstallDatabase
+ ${If} $DatabaseDone = 0
+ ${If} $OptionsDirectorDB = 1
+ File /oname=bacula_cats.dll "cats_mysql.dll"
+ File "libmysql.dll"
+ ${ElseIf} $OptionsDirectorDB = 2
+ File /oname=bacula_cats.dll "cats_pgsql.dll"
+ File "libpq.dll"
+ ${ElseIf} $OptionsDirectorDB = 3
+ File /oname=bacula_cats.dll "cats_bdb.dll"
+ ${EndIf}
+
+ StrCpy $DatabaseDone 1
+ ${EndIf}
+FunctionEnd
+
Section "-Initialize"
; Create Start Menu Directory
SectionIn 2 3
SetOutPath "$INSTDIR\bin"
+ Call InstallDatabase
File "loaderinfo.exe"
File "mt.exe"
File "mtx.exe"
SectionIn 2 3
SetOutPath "$INSTDIR\bin"
+ Call InstallDatabase
File "bacula-dir.exe"
File "dbcheck.exe"
- File ..\cats\create_mysql_database.cmd
- File ..\cats\drop_mysql_database.cmd
- File ..\cats\make_mysql_tables.cmd
- File ..\cats\make_mysql_tables.sql
- File ..\cats\drop_mysql_tables.cmd
- File ..\cats\drop_mysql_tables.sql
- File ..\cats\update_mysql_tables.cmd
- File ..\cats\update_mysql_tables.sql
- File ..\cats\grant_mysql_privileges.cmd
- File ..\cats\grant_mysql_privileges.sql
+
+ ${If} $OptionsDirectorDB = 1
+ File /oname=create_database.cmd ..\cats\create_mysql_database.cmd
+ File /oname=drop_database.cmd ..\cats\drop_mysql_database.cmd
+ File /oname=make_tables.cmd ..\cats\make_mysql_tables.cmd
+ File ..\cats\make_mysql_tables.sql
+ File /oname=drop_tables.cmd ..\cats\drop_mysql_tables.cmd
+ File ..\cats\drop_mysql_tables.sql
+ File /oname=update_tables.cmd ..\cats\update_mysql_tables.cmd
+ File ..\cats\update_mysql_tables.sql
+ File /oname=grant_privileges.cmd ..\cats\grant_mysql_privileges.cmd
+ File ..\cats\grant_mysql_privileges.sql
+ ${ElseIf} $OptionsDirectorDB = 2
+ File /oname=create_database.cmd ..\cats\create_postgresql_database.cmd
+ File /oname=drop_database.cmd ..\cats\drop_postgresql_database.cmd
+ File /oname=make_tables.cmd ..\cats\make_postgresql_tables.cmd
+ File ..\cats\make_postgresql_tables.sql
+ File /oname=drop_tables.cmd ..\cats\drop_postgresql_tables.cmd
+ File ..\cats\drop_postgresql_tables.sql
+ File /oname=update_tables.cmd ..\cats\update_postgresql_tables.cmd
+ File ..\cats\update_postgresql_tables.sql
+ File /oname=grant_privileges.cmd ..\cats\grant_postgresql_privileges.cmd
+ File ..\cats\grant_postgresql_privileges.sql
+ ${ElseIf} $OptionsDirectorDB = 3
+ File /oname=create_database.cmd ../cats/create_bdb_database.cmd
+ File /oname=drop_database.cmd ../cats/drop_bdb_database.cmd
+ File /oname=make_tables.cmd ../cats/make_bdb_tables.cmd
+ File /oname=drop_tables.cmd ../cats/drop_bdb_tables.cmd
+ File /oname=update_tables.cmd ../cats/update_bdb_tables.cmd
+ File /oname=grant_privileges.cmd ../cats/grant_bdb_privileges.cmd
+ ${EndIf}
File ..\cats\make_catalog_backup.cmd
File ..\cats\delete_catalog_backup.cmd
FileWrite $R3 '[Field $R4]$\r$\nType="Checkbox"$\r$\nState=$OptStart$\r$\nText="Start after install"$\r$\nLeft=6$\r$\nTop=$R5$\r$\nRight=280$\r$\nBottom=$R6$\r$\n'
+ IntOp $R4 $R4 + 1
+ IntOp $R5 $R6 + 8
+ ${Endif}
+
+ ${If} $R1 = 1
+ ${OrIf} $R2 = 1
IntOp $R4 $R4 + 1
IntOp $R5 $R6 + 2
IntOp $R6 $R5 + 8
FileWrite $R3 '[Field $R4]$\r$\nType="RadioButton"$\r$\nState=0$\r$\nText="Builtin"$\r$\nFlags="NOTABSTOP"$\r$\nLeft=142$\r$\nTop=$R5$\r$\nRight=182$\r$\nBottom=$R6$\r$\n'
- IntOp $R4 $R4 + 1
- IntOp $R5 $R6 + 8
${Endif}
-
+
IntOp $R4 $R4 - 1
FileWrite $R3 "[Settings]$\r$\nNumFields=$R4$\r$\n"
--- /dev/null
+@echo off
+REM
+REM
+REM Bacula interface to mtx autoloader
+REM
+REM $Id$
+REM
+REM If you set in your Device resource
+REM
+REM Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d"
+REM you will have the following input to this script:
+REM
+REM So Bacula will always call with all the following arguments, even though
+REM in come cases, not all are used.
+REM
+REM mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index"
+REM $1 $2 $3 $4 $5
+REM
+REM for example:
+REM
+REM mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system)
+REM
+REM will request to load the first cartidge into drive 0, where
+REM the SCSI control channel is /dev/sg0, and the read/write device
+REM is /dev/nst0.
+REM
+REM If you need to an offline, refer to the drive as $4
+REM e.g. mt -f $4 offline
+REM
+REM Many changers need an offline after the unload. Also many
+REM changers need a sleep 60 after the mtx load.
+REM
+REM N.B. If you change the script, take care to return either
+REM the mtx exit code or a 0. If the script exits with a non-zero
+REM exit code, Bacula will assume the request failed.
+REM
+
+SET MTX=@MTX@
+SET MT=@MT@
+SET working_dir=@working_dir@
+
+SET dbgfile=%working_dir%\mtx.log
+
+REM to turn on logging, uncomment the following line
+REM findstr xxx <nul >>%working_dir%\mtx.log
+
+REM
+REM check parameter count on commandline
+REM
+REM Check for special cases where only 2 arguments are needed,
+REM all others are a minimum of 5
+REM
+IF "%1" EQU "" goto :param_count_invalid
+IF "%2" EQU "" goto :param_count_invalid
+IF "%2" EQU "list" goto :param_count_valid
+IF "%2" EQU "slots" goto :param_count_valid
+IF "%3" EQU "" goto :param_count_invalid
+IF "%4" EQU "" goto :param_count_invalid
+IF "%5" EQU "" goto :param_count_invalid
+GOTO :param_count_valid
+
+:param_count_invalid
+ echo Insufficient number of arguments given.
+ IF "%2" EQU "" (
+ echo At least two arguments must be specified.
+ ) else echo Command expected 5 arguments.
+:usage
+ ECHO.
+ ECHO usage: mtx-changer ctl-device command [slot archive-device drive-index]
+ ECHO Valid commands are: unload, load, list, loaded, and slots.
+ EXIT /B 1
+
+:param_count_valid
+
+REM Setup arguments
+SET ctl=%1
+SET cmd=%2
+SET slot=%3
+SET device=%4
+SET drive=%5
+
+CALL :debug "Parms: %ctl% %cmd% %slot% %device% %drive%"
+
+IF "%cmd%" NEQ "unload" goto :cmdLoad
+ CALL :debug "Doing mtx -f %ctl% unload %slot% %drive%"
+ %MT% -f %device% offline
+ %MTX% -f %ctl% unload %slot% %drive%
+ EXIT /B %ERRORLEVEL%
+
+:cmdLoad
+IF "%cmd%" NEQ "load" goto :cmdList
+ CALL :debug "Doing mtx -f %ctl% load %slot% %drive%"
+ %MTX% -f %ctl% load %slot% %drive%
+ SET rtn=%ERRORLEVEL%
+ %MT% -f %device% load
+ CALL :wait_for_drive %device%
+ EXIT /B %rtn%
+
+:cmdList
+IF "%cmd%" NEQ "list" goto :cmdLoaded
+ CALL :debug "Doing mtx -f %ctl% -- to list volumes"
+ CALL :make_temp_file
+ IF ERRORLEVEL 1 GOTO :EOF
+REM Enable the following if you are using barcodes and need an inventory
+REM %MTX% -f %ctl% inventory
+ %MTX% -f %ctl% status >%TMPFILE%
+ SET rtn=%ERRORLEVEL%
+ FOR /F "usebackq tokens=3,6 delims==: " %%i in ( `findstr /R /C:" *Storage Element [0-9]*:.*Full" %TMPFILE%` ) do echo %%i:%%j
+ FOR /F "usebackq tokens=7,10" %%i in ( `findstr /R /C:"^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" %TMPFILE%` ) do echo %%i:%%j
+ DEL /F "%TMPFILE%" >nul 2>&1
+REM
+REM If you have a VXA PacketLoader and the above does not work, try
+REM turning it off and enabling the following line.
+REM %MTX% -f %ctl% status | grep " *Storage Element [0-9]*:.*Full" | sed "s/*Storage Element //" | sed "s/Full :VolumeTag=//"
+ EXIT /B %rtn%
+
+:cmdLoaded
+IF "%cmd%" NEQ "loaded" goto :cmdSlots
+ CALL :debug "Doing mtx -f %ctl% %drive% -- to find what is loaded"
+ CALL :make_temp_file
+ %MTX% -f %ctl% status >%TMPFILE%
+ SET rtn=%ERRORLEVEL%
+ FOR /F "usebackq tokens=7" %%i in ( `findstr /R /C:"^Data Transfer Element %drive%:Full" %TMPFILE%` ) do echo %%i
+ findstr /R /C:"^Data Transfer Element %drive%:Empty" %TMPFILE% >nul && echo 0
+ DEL /F "%TMPFILE%" >nul 2>&1
+ EXIT /B %rtn%
+
+:cmdSlots
+IF "%cmd%" NEQ "slots" goto :cmdUnknown
+ CALL :debug "Doing mtx -f %ctl% -- to get count of slots"
+ CALL :make_temp_file
+ %MTX% -f %ctl% status >%TMPFILE%
+ SET rtn=%ERRORLEVEL%
+ FOR /F "usebackq tokens=5" %%i in ( `findstr /R /C:" *Storage Changer" %TMPFILE%` ) do echo %%i
+ DEL /F "%TMPFILE%" >nul 2>&1
+ EXIT /B %rtn%
+
+:cmdUnknown
+ ECHO '%cmd%' is an invalid command.
+ GOTO :usage
+
+REM
+REM log whats done
+REM
+:debug
+ IF NOT EXIST "%dbgfile%" GOTO :EOF
+ FOR /F "usebackq tokens=2-4,5-7 delims=/:. " %%i in ( '%DATE% %TIME%' ) do SET TIMESTAMP=%%k%%i%%j-%%l:%%m:%%n
+ ECHO %TIMESTAMP% %*>> %dbgfile%
+ GOTO :EOF
+
+REM
+REM Create a temporary file
+REM
+:make_temp_file
+ REM SET TMPFILE=%working_dir%\mtx.tmp
+ SET TMPFILE=c:\bacula.test\working\mtx.tmp
+ IF EXIST "%TMPFILE%" (
+ ECHO Temp file security problem on: %TMPFILE%
+ EXIT /B 1
+ )
+ GOTO :EOF
+
+REM
+REM The purpose of this function to wait a maximum
+REM time for the drive. It will
+REM return as soon as the drive is ready, or after
+REM waiting a maximum of 300 seconds.
+REM Note, this is very system dependent, so if you are
+REM not running on Linux, you will probably need to
+REM re-write it, or at least change the grep target.
+REM
+:wait_for_drive
+ FOR /L %%i IN ( 1, 1, 300 ) DO (
+ %MT% -f %1 status | findstr ONLINE >NUL 2>&1
+ IF %ERRORLEVEL%==0 GOTO :EOF
+ CALL :debug "Device %1 - not ready, retrying..."
+ CALL :sleep 1
+ )
+ CALL :debug "Device %1 - not ready, timed out..."
+ GOTO :EOF
+
+:sleep
+ CALL :get_secs
+ SET start_time=%ERRORLEVEL%
+ SET /A end_time=100*%1+start_time
+:sleep_wait
+ CALL :get_secs
+ IF %ERRORLEVEL% LSS %start_time% GOTO :sleep
+ IF %ERRORLEVEL% LSS %end_time% GOTO :sleep_wait
+ GOTO :EOF
+
+:get_secs
+ FOR /F "tokens=3,4 delims=:. " %%i IN ( "%TIME%" ) do SET /A "secs= ( 1%%i %% 100 ) * 100 + ( 1%%j %% 100 )"
+ EXIT /B %secs%
+++ /dev/null
-@echo off
-REM
-REM
-REM Bacula interface to mtx autoloader
-REM
-REM $Id$
-REM
-REM If you set in your Device resource
-REM
-REM Changer Command = "path-to-this-script/mtx-changer %c %o %S %a %d"
-REM you will have the following input to this script:
-REM
-REM So Bacula will always call with all the following arguments, even though
-REM in come cases, not all are used.
-REM
-REM mtx-changer "changer-device" "command" "slot" "archive-device" "drive-index"
-REM $1 $2 $3 $4 $5
-REM
-REM for example:
-REM
-REM mtx-changer /dev/sg0 load 1 /dev/nst0 0 (on a Linux system)
-REM
-REM will request to load the first cartidge into drive 0, where
-REM the SCSI control channel is /dev/sg0, and the read/write device
-REM is /dev/nst0.
-REM
-REM If you need to an offline, refer to the drive as $4
-REM e.g. mt -f $4 offline
-REM
-REM Many changers need an offline after the unload. Also many
-REM changers need a sleep 60 after the mtx load.
-REM
-REM N.B. If you change the script, take care to return either
-REM the mtx exit code or a 0. If the script exits with a non-zero
-REM exit code, Bacula will assume the request failed.
-REM
-
-SET MTX=@MTX@
-SET MT=@MT@
-SET working_dir=@working_dir@
-
-SET dbgfile=%working_dir%\mtx.log
-
-REM to turn on logging, uncomment the following line
-REM findstr xxx <nul >>%working_dir%\mtx.log
-
-REM
-REM check parameter count on commandline
-REM
-REM Check for special cases where only 2 arguments are needed,
-REM all others are a minimum of 5
-REM
-IF "%1" EQU "" goto :param_count_invalid
-IF "%2" EQU "" goto :param_count_invalid
-IF "%2" EQU "list" goto :param_count_valid
-IF "%2" EQU "slots" goto :param_count_valid
-IF "%3" EQU "" goto :param_count_invalid
-IF "%4" EQU "" goto :param_count_invalid
-IF "%5" EQU "" goto :param_count_invalid
-GOTO :param_count_valid
-
-:param_count_invalid
- echo Insufficient number of arguments given.
- IF "%2" EQU "" (
- echo At least two arguments must be specified.
- ) else echo Command expected 5 arguments.
-:usage
- ECHO.
- ECHO usage: mtx-changer ctl-device command [slot archive-device drive-index]
- ECHO Valid commands are: unload, load, list, loaded, and slots.
- EXIT /B 1
-
-:param_count_valid
-
-REM Setup arguments
-SET ctl=%1
-SET cmd=%2
-SET slot=%3
-SET device=%4
-SET drive=%5
-
-CALL :debug "Parms: %ctl% %cmd% %slot% %device% %drive%"
-
-IF "%cmd%" NEQ "unload" goto :cmdLoad
- CALL :debug "Doing mtx -f %ctl% unload %slot% %drive%"
- %MT% -f %device% offline
- %MTX% -f %ctl% unload %slot% %drive%
- EXIT /B %ERRORLEVEL%
-
-:cmdLoad
-IF "%cmd%" NEQ "load" goto :cmdList
- CALL :debug "Doing mtx -f %ctl% load %slot% %drive%"
- %MTX% -f %ctl% load %slot% %drive%
- SET rtn=%ERRORLEVEL%
- %MT% -f %device% load
- CALL :wait_for_drive %device%
- EXIT /B %rtn%
-
-:cmdList
-IF "%cmd%" NEQ "list" goto :cmdLoaded
- CALL :debug "Doing mtx -f %ctl% -- to list volumes"
- CALL :make_temp_file
- IF ERRORLEVEL 1 GOTO :EOF
-REM Enable the following if you are using barcodes and need an inventory
-REM %MTX% -f %ctl% inventory
- %MTX% -f %ctl% status >%TMPFILE%
- SET rtn=%ERRORLEVEL%
- FOR /F "usebackq tokens=3,6 delims==: " %%i in ( `findstr /R /C:" *Storage Element [0-9]*:.*Full" %TMPFILE%` ) do echo %%i:%%j
- FOR /F "usebackq tokens=7,10" %%i in ( `findstr /R /C:"^Data Transfer Element [0-9]*:Full (Storage Element [0-9]" %TMPFILE%` ) do echo %%i:%%j
- DEL /F "%TMPFILE%" >nul 2>&1
-REM
-REM If you have a VXA PacketLoader and the above does not work, try
-REM turning it off and enabling the following line.
-REM %MTX% -f %ctl% status | grep " *Storage Element [0-9]*:.*Full" | sed "s/*Storage Element //" | sed "s/Full :VolumeTag=//"
- EXIT /B %rtn%
-
-:cmdLoaded
-IF "%cmd%" NEQ "loaded" goto :cmdSlots
- CALL :debug "Doing mtx -f %ctl% %drive% -- to find what is loaded"
- CALL :make_temp_file
- %MTX% -f %ctl% status >%TMPFILE%
- SET rtn=%ERRORLEVEL%
- FOR /F "usebackq tokens=7" %%i in ( `findstr /R /C:"^Data Transfer Element %drive%:Full" %TMPFILE%` ) do echo %%i
- findstr /R /C:"^Data Transfer Element %drive%:Empty" %TMPFILE% >nul && echo 0
- DEL /F "%TMPFILE%" >nul 2>&1
- EXIT /B %rtn%
-
-:cmdSlots
-IF "%cmd%" NEQ "slots" goto :cmdUnknown
- CALL :debug "Doing mtx -f %ctl% -- to get count of slots"
- CALL :make_temp_file
- %MTX% -f %ctl% status >%TMPFILE%
- SET rtn=%ERRORLEVEL%
- FOR /F "usebackq tokens=5" %%i in ( `findstr /R /C:" *Storage Changer" %TMPFILE%` ) do echo %%i
- DEL /F "%TMPFILE%" >nul 2>&1
- EXIT /B %rtn%
-
-:cmdUnknown
- ECHO '%cmd%' is an invalid command.
- GOTO :usage
-
-REM
-REM log whats done
-REM
-:debug
- IF NOT EXIST "%dbgfile%" GOTO :EOF
- FOR /F "usebackq tokens=2-4,5-7 delims=/:. " %%i in ( '%DATE% %TIME%' ) do SET TIMESTAMP=%%k%%i%%j-%%l:%%m:%%n
- ECHO %TIMESTAMP% %*>> %dbgfile%
- GOTO :EOF
-
-REM
-REM Create a temporary file
-REM
-:make_temp_file
- REM SET TMPFILE=%working_dir%\mtx.tmp
- SET TMPFILE=c:\bacula.test\working\mtx.tmp
- IF EXIST "%TMPFILE%" (
- ECHO Temp file security problem on: %TMPFILE%
- EXIT /B 1
- )
- GOTO :EOF
-
-REM
-REM The purpose of this function to wait a maximum
-REM time for the drive. It will
-REM return as soon as the drive is ready, or after
-REM waiting a maximum of 300 seconds.
-REM Note, this is very system dependent, so if you are
-REM not running on Linux, you will probably need to
-REM re-write it, or at least change the grep target.
-REM
-:wait_for_drive
- FOR /L %%i IN ( 1, 1, 300 ) DO (
- %MT% -f %1 status | findstr ONLINE >NUL 2>&1
- IF %ERRORLEVEL%==0 GOTO :EOF
- CALL :debug "Device %1 - not ready, retrying..."
- CALL :sleep 1
- )
- CALL :debug "Device %1 - not ready, timed out..."
- GOTO :EOF
-
-:sleep
- CALL :get_secs
- SET start_time=%ERRORLEVEL%
- SET /A end_time=100*%1+start_time
-:sleep_wait
- CALL :get_secs
- IF %ERRORLEVEL% LSS %start_time% GOTO :sleep
- IF %ERRORLEVEL% LSS %end_time% GOTO :sleep_wait
- GOTO :EOF
-
-:get_secs
- FOR /F "tokens=3,4 delims=:. " %%i IN ( "%TIME%" ) do SET /A "secs= ( 1%%i %% 100 ) * 100 + ( 1%%j %% 100 )"
- EXIT /B %secs%
$(BINDIR)/bextract.exe: $(BEXTRACT_OBJS) $(COMMON_OBJS) $(LIBS_BACULA)
$(call link_conapp,$(LIBS_STORED) $(LIBS_ZLIB))
-$(BINDIR)/bscan.exe: $(OBJDIR)/bscan.o $(COMMON_OBJS) $(LIBS_BACULA)
+$(BINDIR)/bscan.exe: $(OBJDIR)/bscan.o $(COMMON_OBJS) $(LIBS_CATS) $(LIBS_BACULA)
$(call link_conapp,$(LIBS_STORED))
$(BINDIR)/bcopy.exe: $(OBJDIR)/bcopy.o $(COMMON_OBJS) $(LIBS_BACULA)
DEFINES = \
-DUSING_DLL \
+ -DUSING_CATS \
$(HAVES)
VPATH = ../../tools ../../dird
$(BINDIR)/bsmtp.exe: $(OBJDIR)/bsmtp.o $(LIBS_BACULA)
$(call link_conapp,)
-$(BINDIR)/dbcheck.exe: $(OBJDIR)/dbcheck.o $(DIRCONF_OBJS) $(LIBS_BACULA)
+$(BINDIR)/dbcheck.exe: $(OBJDIR)/dbcheck.o $(DIRCONF_OBJS) $(LIBS_BACULA) $(LIBS_CATS)
$(call link_conapp,)
$(BINDIR)/fstype.exe: $(OBJDIR)/fstype.o $(LIBS_BACULA)