From d4745c3ac42f38179852e92619ef573278cc1347 Mon Sep 17 00:00:00 2001 From: Kern Sibbald Date: Fri, 22 Feb 2008 17:34:36 +0000 Subject: [PATCH] Apply patch (with some difficulties) from Joao Henrique Freitas , which adds support for libdbi as a Bacula database driver. git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@6464 91ce42f0-d328-0410-95d8-f526ca767f89 --- bacula/AUTHORS | 1 + bacula/autoconf/bacula-macros/db.m4 | 121 ++++ bacula/autoconf/config.h.in | 7 + bacula/autoconf/configure.in | 5 +- bacula/configure | 151 ++++- bacula/src/cats/Makefile.in | 6 +- bacula/src/cats/cats.h | 125 +++- bacula/src/cats/dbi.c | 987 ++++++++++++++++++++++++++++ bacula/src/cats/sql_cmds.c | 253 ++++--- bacula/src/cats/sql_cmds.h | 14 +- bacula/src/cats/sql_create.c | 4 +- bacula/src/cats/sql_delete.c | 2 +- bacula/src/cats/sql_find.c | 2 +- bacula/src/cats/sql_get.c | 4 +- bacula/src/cats/sql_list.c | 2 +- bacula/src/cats/sql_update.c | 2 +- bacula/src/dird/bacula-dir.conf.in | 2 + bacula/src/dird/ua_prune.c | 16 +- bacula/src/dird/ua_restore.c | 11 +- bacula/src/stored/bscan.c | 18 +- bacula/technotes-2.3 | 3 + 21 files changed, 1600 insertions(+), 136 deletions(-) create mode 100644 bacula/src/cats/dbi.c diff --git a/bacula/AUTHORS b/bacula/AUTHORS index 17bd5e0474..e2eb11cc08 100644 --- a/bacula/AUTHORS +++ b/bacula/AUTHORS @@ -44,6 +44,7 @@ Frank Sweetser Howard Thomson Jaime Ventura Jan Kesten +Joao Henrique Freitas John Goerzen John Kodis John Walker diff --git a/bacula/autoconf/bacula-macros/db.m4 b/bacula/autoconf/bacula-macros/db.m4 index d8f846cebd..dbc9533658 100644 --- a/bacula/autoconf/bacula-macros/db.m4 +++ b/bacula/autoconf/bacula-macros/db.m4 @@ -1,3 +1,124 @@ +AC_DEFUN([BA_CHECK_DBI_DB], +[ +db_found=no +AC_MSG_CHECKING(for DBI support) +AC_ARG_WITH(dbi, +[ + --with-dbi@<:@=DIR@:>@ Include DBI support. DIR is the DBD base + install directory, default is to search through + a number of common places for the DBI files.], +[ + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/include/dbi/dbi.h; then + DBI_INCDIR=/usr/local/dbi/include + if test -d /usr/local/lib64; then + DBI_LIBDIR=/usr/local/lib64 + else + DBI_LIBDIR=/usr/local/lib + fi + DBI_BINDIR=/usr/local/bin + elif test -f /usr/include/dbi/dbi.h; then + DBI_INCDIR=/usr/include + if test -d /usr/lib64; then + DBI_LIBDIR=/usr/lib64 + else + DBI_LIBDIR=/usr/lib + fi + DBI_BINDIR=/usr/bin + elif test -f $prefix/include/dbi/dbi.h; then + DBI_INCDIR=$prefix/include + if test -d $prefix/lib64; then + DBI_LIBDIR=$prefix/lib64 + else + DBI_LIBDIR=$prefix/lib + fi + DBI_BINDIR=$prefix/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find dbi.h in standard locations) + fi + if test -d /usr/local/lib/dbd; then + DRIVERDIR=/usr/local/lib/dbd + if test -d /usr/local/lib64/dbd; then + DRIVERDIR=/usr/local/lib64/dbd + else + DRIVERDIR=/usr/local/lib/dbd + fi + elif test -d /usr/lib/dbd; then + DRIVERDIR=/usr/lib/dbd + if test -d /usr/lib64/dbd; then + DRIVERDIR=/usr/lib64/dbd + else + DRIVERDIR=/usr/lib/dbd + fi + elif test -d $prefix/lib/dbd; then + if test -d $prefix/lib64/dbd; then + DRIVERDIR=$prefix/lib64/dbd + else + DRIVERDIR=$prefix/lib/dbd + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Unable to find DBD drivers in standard locations) + fi + else + if test -f $withval/dbi.h; then + DBI_INCDIR=$withval + DBI_LIBDIR=$withval + DBI_BINDIR=$withval + elif test -f $withval/include/dbi/dbi.h; then + DBI_INCDIR=$withval/include + if test -d $withval/lib64; then + DBI_LIBDIR=$withval/lib64 + else + DBI_LIBDIR=$withval/lib + fi + DBI_BINDIR=$withval/bin + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid DBI directory $withval - unable to find dbi.h under $withval) + fi + if test -d $withval/dbd; then + DRIVERDIR=$withval/dbd + elif test -d $withval/lib/; then + if test -d $withval/lib64/dbd; then + DRIVERDIR=$withval/lib64/dbd + else + DRIVERDIR=$withval/lib/dbd + fi + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(Invalid DBD driver directory $withval - unable to find DBD drivers under $withval) + fi + fi + SQL_INCLUDE=-I$DBI_INCDIR + SQL_LFLAGS="-L$DBI_LIBDIR -ldbi" + SQL_BINDIR=$DBI_BINDIR + SQL_LIB=$DBI_LIBDIR/libdbi.a + DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\"" + + AC_DEFINE(HAVE_DBI) + AC_MSG_RESULT(yes) + db_found=yes + support_dbi=yes + db_type=DBI + DB_TYPE=dbi + + else + AC_MSG_RESULT(no) + fi +],[ + AC_MSG_RESULT(no) +]) +AC_SUBST(SQL_LFLAGS) +AC_SUBST(SQL_INCLUDE) +AC_SUBST(SQL_BINDIR) +AC_SUBST(DBI_DBD_DRIVERDIR) + +]) + + AC_DEFUN([BA_CHECK_MYSQL_DB], [ db_found=no diff --git a/bacula/autoconf/config.h.in b/bacula/autoconf/config.h.in index 1c3a0aff69..1c072aac7c 100644 --- a/bacula/autoconf/config.h.in +++ b/bacula/autoconf/config.h.in @@ -21,6 +21,9 @@ /* Define to `int' if doesn't define. */ #undef ssize_t +/* Define if you want to use DBI */ +#undef HAVE_DBI + /* Define if you want to use PostgreSQL */ #undef HAVE_POSTGRESQL @@ -322,6 +325,10 @@ */ #undef HAVE_DECL_TZNAME +/* Define to 1 if you have the declaration of `tzname', and to 0 if you don't. + */ +#undef HAVE_DECL_TZNAME + /* Define to 1 if you have the declaration of `_snprintf', and to 0 if you don't. */ #undef HAVE_DECL__SNPRINTF diff --git a/bacula/autoconf/configure.in b/bacula/autoconf/configure.in index 421e02ed89..ad7144a103 100644 --- a/bacula/autoconf/configure.in +++ b/bacula/autoconf/configure.in @@ -189,6 +189,7 @@ support_mysql=no support_sqlite=no support_sqlite3=no support_postgresql=no +support_dbi=no support_smartalloc=yes support_readline=yes support_conio=yes @@ -1329,6 +1330,8 @@ BA_CHECK_SQLITE3_DB BA_CHECK_SQLITE_DB +BA_CHECK_DBI_DB + AC_SUBST(cats) AC_SUBST(DB_TYPE) @@ -2332,7 +2335,7 @@ if test "x${db_type}" = "xInternal" ; then echo " " echo " " echo "You have not specified either --enable-client-only or one of the" - echo " supported databases: MySQL, PostgreSQL, SQLite3 or SQLite." + echo " supported databases: MySQL, PostgreSQL, SQLite3, SQLite or DBI." echo " This is not permitted. Please reconfigure." echo " " echo "Aborting the configuration ..." diff --git a/bacula/configure b/bacula/configure index ce4330fee1..85c2f05f2b 100755 --- a/bacula/configure +++ b/bacula/configure @@ -831,6 +831,7 @@ SBINPERM SQL_LFLAGS SQL_INCLUDE SQL_BINDIR +DBI_DBD_DRIVERDIR cats DB_TYPE GETCONF @@ -1532,6 +1533,10 @@ Optional Packages: --with-sqlite[=DIR] Include SQLite support. DIR is the SQLite base install directory, default is to search through a number of common places for the SQLite files. + + --with-dbi[=DIR] Include DBI support. DIR is the DBD base + install directory, default is to search through + a number of common places for the DBI files. --with-x use the X Window System Some influential environment variables: @@ -13420,6 +13425,7 @@ support_mysql=no support_sqlite=no support_sqlite3=no support_postgresql=no +support_dbi=no support_smartalloc=yes support_readline=yes support_conio=yes @@ -17848,6 +17854,146 @@ fi +db_found=no +{ echo "$as_me:$LINENO: checking for DBI support" >&5 +echo $ECHO_N "checking for DBI support... $ECHO_C" >&6; } + +# Check whether --with-dbi was given. +if test "${with_dbi+set}" = set; then + withval=$with_dbi; + if test "$withval" != "no"; then + if test "$withval" = "yes"; then + if test -f /usr/local/include/dbi/dbi.h; then + DBI_INCDIR=/usr/local/dbi/include + if test -d /usr/local/lib64; then + DBI_LIBDIR=/usr/local/lib64 + else + DBI_LIBDIR=/usr/local/lib + fi + DBI_BINDIR=/usr/local/bin + elif test -f /usr/include/dbi/dbi.h; then + DBI_INCDIR=/usr/include + if test -d /usr/lib64; then + DBI_LIBDIR=/usr/lib64 + else + DBI_LIBDIR=/usr/lib + fi + DBI_BINDIR=/usr/bin + elif test -f $prefix/include/dbi/dbi.h; then + DBI_INCDIR=$prefix/include + if test -d $prefix/lib64; then + DBI_LIBDIR=$prefix/lib64 + else + DBI_LIBDIR=$prefix/lib + fi + DBI_BINDIR=$prefix/bin + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: Unable to find dbi.h in standard locations" >&5 +echo "$as_me: error: Unable to find dbi.h in standard locations" >&2;} + { (exit 1); exit 1; }; } + fi + if test -d /usr/local/lib/dbd; then + DRIVERDIR=/usr/local/lib/dbd + if test -d /usr/local/lib64/dbd; then + DRIVERDIR=/usr/local/lib64/dbd + else + DRIVERDIR=/usr/local/lib/dbd + fi + elif test -d /usr/lib/dbd; then + DRIVERDIR=/usr/lib/dbd + if test -d /usr/lib64/dbd; then + DRIVERDIR=/usr/lib64/dbd + else + DRIVERDIR=/usr/lib/dbd + fi + elif test -d $prefix/lib/dbd; then + if test -d $prefix/lib64/dbd; then + DRIVERDIR=$prefix/lib64/dbd + else + DRIVERDIR=$prefix/lib/dbd + fi + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: Unable to find DBD drivers in standard locations" >&5 +echo "$as_me: error: Unable to find DBD drivers in standard locations" >&2;} + { (exit 1); exit 1; }; } + fi + else + if test -f $withval/dbi.h; then + DBI_INCDIR=$withval + DBI_LIBDIR=$withval + DBI_BINDIR=$withval + elif test -f $withval/include/dbi/dbi.h; then + DBI_INCDIR=$withval/include + if test -d $withval/lib64; then + DBI_LIBDIR=$withval/lib64 + else + DBI_LIBDIR=$withval/lib + fi + DBI_BINDIR=$withval/bin + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: Invalid DBI directory $withval - unable to find dbi.h under $withval" >&5 +echo "$as_me: error: Invalid DBI directory $withval - unable to find dbi.h under $withval" >&2;} + { (exit 1); exit 1; }; } + fi + if test -d $withval/dbd; then + DRIVERDIR=$withval/dbd + elif test -d $withval/lib/; then + if test -d $withval/lib64/dbd; then + DRIVERDIR=$withval/lib64/dbd + else + DRIVERDIR=$withval/lib/dbd + fi + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + { { echo "$as_me:$LINENO: error: Invalid DBD driver directory $withval - unable to find DBD drivers under $withval" >&5 +echo "$as_me: error: Invalid DBD driver directory $withval - unable to find DBD drivers under $withval" >&2;} + { (exit 1); exit 1; }; } + fi + fi + SQL_INCLUDE=-I$DBI_INCDIR + SQL_LFLAGS="-L$DBI_LIBDIR -ldbi" + SQL_BINDIR=$DBI_BINDIR + SQL_LIB=$DBI_LIBDIR/libdbi.a + DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\"" + + cat >>confdefs.h <<\_ACEOF +#define HAVE_DBI 1 +_ACEOF + + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + db_found=yes + support_dbi=yes + db_type=DBI + DB_TYPE=dbi + + else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + fi + +else + + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + +fi + + + + + + + + + # ------------------------------------------- @@ -31138,6 +31284,7 @@ SBINPERM!$SBINPERM$ac_delim SQL_LFLAGS!$SQL_LFLAGS$ac_delim SQL_INCLUDE!$SQL_INCLUDE$ac_delim SQL_BINDIR!$SQL_BINDIR$ac_delim +DBI_DBD_DRIVERDIR!$DBI_DBD_DRIVERDIR$ac_delim cats!$cats$ac_delim DB_TYPE!$DB_TYPE$ac_delim GETCONF!$GETCONF$ac_delim @@ -31166,7 +31313,7 @@ DISTVER!$DISTVER$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 53; then + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 54; then break elif $ac_last_try; then { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 @@ -31808,7 +31955,7 @@ if test "x${db_type}" = "xInternal" ; then echo " " echo " " echo "You have not specified either --enable-client-only or one of the" - echo " supported databases: MySQL, PostgreSQL, SQLite3 or SQLite." + echo " supported databases: MySQL, PostgreSQL, SQLite3, SQLite or DBI." echo " This is not permitted. Please reconfigure." echo " " echo "Aborting the configuration ..." diff --git a/bacula/src/cats/Makefile.in b/bacula/src/cats/Makefile.in index 3730074e40..d137ca452c 100644 --- a/bacula/src/cats/Makefile.in +++ b/bacula/src/cats/Makefile.in @@ -11,7 +11,7 @@ topdir = ../.. # this dir relative to top dir thisdir = src/cats -CPPFLAGS += -DBUILDING_CATS +CPPFLAGS += -DBUILDING_CATS @DBI_DBD_DRIVERDIR@ DEBUG=@DEBUG@ MKDIR=$(topdir)/autoconf/mkinstalldirs @@ -25,11 +25,11 @@ dummy: SVRSRCS = cats.c sql.c SVROBJS = cats.o sql.o -LIBSRCS = mysql.c bdb.c \ +LIBSRCS = mysql.c bdb.c dbi.c \ sql.c sql_cmds.c sql_create.c sql_delete.c sql_find.c \ sql_get.c sql_list.c sql_update.c sqlite.c \ postgresql.c -LIBOBJS = mysql.o bdb.o \ +LIBOBJS = mysql.o bdb.o dbi.o \ sql.o sql_cmds.o sql_create.o sql_delete.o sql_find.o \ sql_get.o sql_list.o sql_update.o sqlite.o \ postgresql.o diff --git a/bacula/src/cats/cats.h b/bacula/src/cats/cats.h index 891efa2d49..c16879eec3 100644 --- a/bacula/src/cats/cats.h +++ b/bacula/src/cats/cats.h @@ -525,6 +525,128 @@ extern const char* my_pg_batch_fill_path_query; #define SQL_ROW POSTGRESQL_ROW #define SQL_FIELD POSTGRESQL_FIELD +#else + +#ifdef HAVE_DBI + +#define BDB_VERSION 10 + +#include + +#define IS_NUM(x) ((x) == 1 || (x) == 2 ) +#define IS_NOT_NULL(x) ((x) == (1 << 0)) + +typedef char **DBI_ROW; +typedef struct dbi_field { + char *name; + int max_length; + unsigned int type; + unsigned int flags; // 1 == not null +} DBI_FIELD; + + +/* + * This is the "real" definition that should only be + * used inside sql.c and associated database interface + * subroutines. + * + * D B I + */ +struct B_DB { + BQUEUE bq; /* queue control */ + brwlock_t lock; /* transaction lock */ + dbi_conn *db; + dbi_result *result; + dbi_error_flag status; + DBI_ROW row; + DBI_FIELD *fields; + int num_rows; + int row_size; /* size of malloced rows */ + int num_fields; + int fields_size; /* size of malloced fields */ + int row_number; /* row number from my_postgresql_data_seek */ + int field_number; /* field number from my_postgresql_field_seek */ + int ref_count; + int db_type; /* DBI driver defined */ + char *db_driverdir ; /* DBI driver dir */ + char *db_driver; /* DBI type database */ + char *db_name; + char *db_user; + char *db_password; + char *db_address; /* host address */ + char *db_socket; /* socket for local access */ + int db_port; /* port of host address */ + int have_insert_id; /* do have insert_id() */ + bool connected; + POOLMEM *errmsg; /* nicely edited error message */ + POOLMEM *cmd; /* SQL command string */ + POOLMEM *cached_path; + int cached_path_len; /* length of cached path */ + uint32_t cached_path_id; + bool allow_transactions; /* transactions allowed */ + bool transaction; /* transaction started */ + int changes; /* changes made to db */ + POOLMEM *fname; /* Filename only */ + POOLMEM *path; /* Path only */ + POOLMEM *esc_name; /* Escaped file name */ + POOLMEM *esc_path; /* Escaped path name */ + int fnl; /* file name length */ + int pnl; /* path name length */ +}; + +void my_dbi_free_result(B_DB *mdb); +DBI_ROW my_dbi_fetch_row (B_DB *mdb); +int my_dbi_query (B_DB *mdb, const char *query); +void my_dbi_data_seek (B_DB *mdb, int row); +void my_dbi_field_seek (B_DB *mdb, int row); +DBI_FIELD * my_dbi_fetch_field(B_DB *mdb); +const char * my_dbi_strerror (B_DB *mdb); +int my_dbi_getisnull (dbi_result *result, int row_number, int column_number); +char * my_dbi_getvalue (dbi_result *result, int row_number, unsigned int column_number); +int my_dbi_sql_insert_id(B_DB *mdb, char *table_name); + +// TODO: do batch insert in DBI +//int my_dbi_batch_start(JCR *jcr, B_DB *mdb); +//int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error); +//typedef struct ATTR_DBR ATTR_DBR; +//int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar); +//char *my_dbi_copy_escape(char *dest, char *src, size_t len); + +//extern const char* my_dbi_batch_lock_path_query; +//extern const char* my_dbi_batch_lock_filename_query; +//extern const char* my_dbi_batch_unlock_tables_query; +//extern const char* my_dbi_batch_fill_filename_query; +//extern const char* my_dbi_batch_fill_path_query; + +/* "Generic" names for easier conversion */ +#define sql_store_result(x) (x)->result +#define sql_free_result(x) my_dbi_free_result(x) +#define sql_fetch_row(x) my_dbi_fetch_row(x) +#define sql_query(x, y) my_dbi_query((x), (y)) +#define sql_close(x) dbi_conn_close((x)->db) +#define sql_strerror(x) my_dbi_strerror(x) +#define sql_num_rows(x) dbi_result_get_numrows((x)->result) +#define sql_data_seek(x, i) my_dbi_data_seek((x), (i)) +/* #define sql_affected_rows(x) dbi_result_get_numrows_affected((x)->result) */ +#define sql_affected_rows(x) 1 +#define sql_insert_id(x,y) my_dbi_sql_insert_id((x), (y)) +#define sql_field_seek(x, y) my_dbi_field_seek((x), (y)) +#define sql_fetch_field(x) my_dbi_fetch_field(x) +#define sql_num_fields(x) ((x)->num_fields) +// TODO: do dbi batch insert +#define sql_batch_start(x,y) my_dbi_batch_start(x,y) +#define sql_batch_end(x,y,z) my_dbi_batch_end(x,y,z) +#define sql_batch_insert(x,y,z) my_dbi_batch_insert(x,y,z) +#define sql_batch_lock_path_query my_dbi_batch_lock_path_query +#define sql_batch_lock_filename_query my_dbi_batch_lock_filename_query +#define sql_batch_unlock_tables_query my_dbi_batch_unlock_tables_query +#define sql_batch_fill_filename_query my_dbi_batch_fill_filename_query +#define sql_batch_fill_path_query my_dbi_batch_fill_path_query + +#define SQL_ROW DBI_ROW +#define SQL_FIELD DBI_FIELD + + #else /* USE BACULA DB routines */ #define HAVE_BACULA_DB 1 @@ -574,6 +696,7 @@ struct B_DB { #endif /* HAVE_MYSQL */ #endif /* HAVE_SQLITE */ #endif /* HAVE_POSTGRESQL */ +#endif /* HAVE_DBI */ #endif /* Use for better error location printing */ @@ -918,7 +1041,7 @@ struct db_int64_ctx { /* * Exported globals from sql.c */ -extern int db_type; /* SQL engine type index */ +extern int DLL_IMP_EXP db_type; /* SQL engine type index */ /* * Some functions exported by sql.c for use within the diff --git a/bacula/src/cats/dbi.c b/bacula/src/cats/dbi.c new file mode 100644 index 0000000000..0dcc4c8b4b --- /dev/null +++ b/bacula/src/cats/dbi.c @@ -0,0 +1,987 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2003-2008 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version two of the GNU General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of John Walker. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ +/* + * Bacula Catalog Database routines specific to DBI + * These are DBI specific routines + * + * João Henrique Freitas, December 2007 + * based upon work done by Dan Langille, December 2003 and + * by Kern Sibbald, March 2000 + * + * Version $Id$ + */ + + +/* The following is necessary so that we do not include + * the dummy external definition of DB. + */ +#define __SQL_C /* indicate that this is sql.c */ + +#include "bacula.h" +#include "cats.h" + +#ifdef HAVE_DBI + +/* ----------------------------------------------------------------------- + * + * DBI dependent defines and subroutines + * + * ----------------------------------------------------------------------- + */ + +/* List of open databases */ +static BQUEUE db_list = {&db_list, &db_list}; + +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Retrieve database type + */ +const char * +db_get_type(void) +{ + return "DBI"; +} + +/* + * Initialize database data structure. In principal this should + * never have errors, or it is really fatal. + */ +B_DB * +db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password, + const char *db_address, int db_port, const char *db_socket, + int mult_db_connections) +{ + B_DB *mdb; + char db_driver[10]; + char db_driverdir[256]; + + /* Constraint the db_driver */ + if(db_type == -1) { + Jmsg(jcr, M_FATAL, 0, _("A dbi driver for DBI must be supplied.\n")); + return NULL; + } + + /* Do the correct selection of driver. + * Can be one of the varius supported by libdbi + */ + switch (db_type) { + case SQL_TYPE_MYSQL: + bstrncpy(db_driver,"mysql", sizeof(db_driver)); + break; + case SQL_TYPE_POSTGRESQL: + bstrncpy(db_driver,"pgsql", sizeof(db_driver)); + break; + case SQL_TYPE_SQLITE: + bstrncpy(db_driver,"pgsql", sizeof(db_driver)); + break; + } + + /* Set db_driverdir whereis is the libdbi drivers */ + bstrncpy(db_driverdir, DBI_DRIVER_DIR, 255); + + if (!db_user) { + Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n")); + return NULL; + } + P(mutex); /* lock DB queue */ + if (!mult_db_connections) { + /* Look to see if DB already open */ + for (mdb=NULL; (mdb=(B_DB *)qnext(&db_list, &mdb->bq)); ) { + if (bstrcmp(mdb->db_name, db_name) && + bstrcmp(mdb->db_address, db_address) && + bstrcmp(mdb->db_driver, db_driver) && + mdb->db_port == db_port) { + Dmsg3(100, "DB REopen %d %s %s\n", mdb->ref_count, db_driver, db_name); + mdb->ref_count++; + V(mutex); + return mdb; /* already open */ + } + } + } + Dmsg0(100, "db_open first time\n"); + mdb = (B_DB *)malloc(sizeof(B_DB)); + memset(mdb, 0, sizeof(B_DB)); + mdb->db_name = bstrdup(db_name); + mdb->db_user = bstrdup(db_user); + if (db_password) { + mdb->db_password = bstrdup(db_password); + } + if (db_address) { + mdb->db_address = bstrdup(db_address); + } + if (db_socket) { + mdb->db_socket = bstrdup(db_socket); + } + if (db_driverdir) { + mdb->db_driverdir = bstrdup(db_driverdir); + } + if (db_driver) { + mdb->db_driver = bstrdup(db_driver); + } + mdb->db_type = db_type; + mdb->db_port = db_port; + mdb->have_insert_id = TRUE; + mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */ + *mdb->errmsg = 0; + mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */ + mdb->cached_path = get_pool_memory(PM_FNAME); + mdb->cached_path_id = 0; + mdb->ref_count = 1; + mdb->fname = get_pool_memory(PM_FNAME); + mdb->path = get_pool_memory(PM_FNAME); + mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_path = get_pool_memory(PM_FNAME); + mdb->allow_transactions = mult_db_connections; + qinsert(&db_list, &mdb->bq); /* put db in list */ + V(mutex); + return mdb; +} + +/* + * Now actually open the database. This can generate errors, + * which are returned in the errmsg + * + * DO NOT close the database or free(mdb) here !!!! + */ +int +db_open_database(JCR *jcr, B_DB *mdb) +{ + int errstat; + int dbstat; + const char *errmsg; + char buf[10], *port; + int numdrivers; + + P(mutex); + if (mdb->connected) { + V(mutex); + return 1; + } + mdb->connected = false; + + if ((errstat=rwl_init(&mdb->lock)) != 0) { + berrno be; + Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), + be.bstrerror(errstat)); + V(mutex); + return 0; + } + + if (mdb->db_port) { + bsnprintf(buf, sizeof(buf), "%d", mdb->db_port); + port = buf; + } else { + port = NULL; + } + + numdrivers = dbi_initialize(mdb->db_driverdir); + if (numdrivers < 0) { + dbi_shutdown(); + Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n" + "db_driverdir=%s. It is probaly not found any drivers\n"), + mdb->db_driverdir,numdrivers); + V(mutex); + return 0; + } + mdb->db = (void **)dbi_conn_new(mdb->db_driver); + dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */ + dbi_conn_set_option(mdb->db, "port", port); /* default port */ + dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */ + dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */ + dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */ + + /* If connection fails, try at 5 sec intervals for 30 seconds. */ + for (int retry=0; retry < 6; retry++) { + + dbstat = dbi_conn_connect(mdb->db); + if ( dbstat == 0) { + break; + } + + dbi_conn_error(mdb->db, &errmsg); + Dmsg1(50, "dbi error: %s\n", errmsg); + + bmicrosleep(5, 0); + + } + + if ( dbstat != 0 ) { + Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface.\n" + "Type=%s Database=%s User=%s\n" + "It is probably not running or your password is incorrect.\n"), + mdb->db_driver, mdb->db_name, mdb->db_user); + V(mutex); + return 0; + } + + Dmsg0(50, "dbi_real_connect done\n"); + Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", + mdb->db_user, mdb->db_name, + mdb->db_password==NULL?"(NULL)":mdb->db_password); + + mdb->connected = true; + + if (!check_tables_version(jcr, mdb)) { + V(mutex); + return 0; + } + + switch (mdb->db_type) { + case SQL_TYPE_MYSQL: + /* Set connection timeout to 8 days specialy for batch mode */ + sql_query(mdb, "SET wait_timeout=691200"); + sql_query(mdb, "SET interactive_timeout=691200"); + break; + case SQL_TYPE_POSTGRESQL: + /* tell PostgreSQL we are using standard conforming strings + and avoid warnings such as: + WARNING: nonstandard use of \\ in a string literal + */ + sql_query(mdb, "SET datestyle TO 'ISO, YMD'"); + sql_query(mdb, "set standard_conforming_strings=on"); + break; + case SQL_TYPE_SQLITE: + break; + } + + V(mutex); + return 1; +} + +void +db_close_database(JCR *jcr, B_DB *mdb) +{ + if (!mdb) { + return; + } + db_end_transaction(jcr, mdb); + P(mutex); + sql_free_result(mdb); + mdb->ref_count--; + if (mdb->ref_count == 0) { + qdchain(&mdb->bq); + if (mdb->connected && mdb->db) { + sql_close(mdb); + mdb->db = NULL; + } + rwl_destroy(&mdb->lock); + free_pool_memory(mdb->errmsg); + free_pool_memory(mdb->cmd); + free_pool_memory(mdb->cached_path); + free_pool_memory(mdb->fname); + free_pool_memory(mdb->path); + free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_path); + if (mdb->db_name) { + free(mdb->db_name); + } + if (mdb->db_user) { + free(mdb->db_user); + } + if (mdb->db_password) { + free(mdb->db_password); + } + if (mdb->db_address) { + free(mdb->db_address); + } + if (mdb->db_socket) { + free(mdb->db_socket); + } + dbi_shutdown(); + if (mdb->db_driver) { + free(mdb->db_driver); + } + free(mdb); + + + } + V(mutex); +} + +void db_thread_cleanup() +{ } + +/* + * Return the next unique index (auto-increment) for + * the given table. Return NULL on error. + * + */ +int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index) +{ + strcpy(index, "NULL"); + return 1; +} + + +/* + * Escape strings so that DBI is happy + * + * NOTE! len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + * + * dbi_conn_quote_string_copy receives a pointer to pointer. + * We need copy the value of pointer to snew. Because libdbi change the + * pointer + */ +void +db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len) +{ + char *inew; + char *pnew; + + if (len == 0) { + snew[0] = 0; + } else { + /* correct the size of old basead in len and copy new string to inew */ + inew = (char *)malloc(sizeof(char) * len + 1); + bstrncpy(inew,old,len + 1); + /* escape the correct size of old */ + dbi_conn_escape_string_copy(mdb->db, inew, &pnew); + /* copy the escaped string to snew */ + bstrncpy(snew, pnew, 2 * len + 1); + } + + Dmsg2(500, "dbi_conn_escape_string_copy %p %s\n",snew,snew); + +} + +/* + * Submit a general SQL command (cmd), and for each row returned, + * the sqlite_handler is called with the ctx. + */ +bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) +{ + SQL_ROW row; + + Dmsg0(500, "db_sql_query started\n"); + + db_lock(mdb); + if (sql_query(mdb, query) != 0) { + Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb)); + db_unlock(mdb); + Dmsg0(500, "db_sql_query failed\n"); + return false; + } + Dmsg0(500, "db_sql_query succeeded. checking handler\n"); + + if (result_handler != NULL) { + Dmsg0(500, "db_sql_query invoking handler\n"); + if ((mdb->result = sql_store_result(mdb)) != NULL) { + int num_fields = sql_num_fields(mdb); + + Dmsg0(500, "db_sql_query sql_store_result suceeded\n"); + while ((row = sql_fetch_row(mdb)) != NULL) { + + Dmsg0(500, "db_sql_query sql_fetch_row worked\n"); + if (result_handler(ctx, num_fields, row)) + break; + } + + sql_free_result(mdb); + } + } + db_unlock(mdb); + + Dmsg0(500, "db_sql_query finished\n"); + + return true; +} + + + +DBI_ROW my_dbi_fetch_row(B_DB *mdb) +{ + int j; + DBI_ROW row = NULL; // by default, return NULL + + Dmsg0(500, "my_dbi_fetch_row start\n"); + + if (!mdb->row || mdb->row_size < mdb->num_fields) { + int num_fields = mdb->num_fields; + Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields); + + if (mdb->row) { + Dmsg0(500, "my_dbi_fetch_row freeing space\n"); + free(mdb->row); + } + num_fields += 20; /* add a bit extra */ + mdb->row = (DBI_ROW)malloc(sizeof(char *) * num_fields); + mdb->row_size = num_fields; + + // now reset the row_number now that we have the space allocated + mdb->row_number = 1; + } + + // if still within the result set + if (mdb->row_number <= mdb->num_rows) { + Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->row_number, mdb->num_rows); + // get each value from this row + for (j = 0; j < mdb->num_fields; j++) { + mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j); + Dmsg2(500, "my_dbi_fetch_row field '%d' has value '%s'\n", j, mdb->row[j]); + } + // increment the row number for the next call + mdb->row_number++; + + row = mdb->row; + } else { + Dmsg2(500, "my_dbi_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->row_number, mdb->num_rows); + } + + Dmsg1(500, "my_dbi_fetch_row finishes returning %p\n", row); + + return row; +} + +int my_dbi_max_length(B_DB *mdb, int field_num) { + // + // for a given column, find the max length + // + int max_length; + int i; + int this_length; + + max_length = 0; + for (i = 0; i < mdb->num_rows; i++) { + if (my_dbi_getisnull(mdb->result, i, field_num)) { + this_length = 4; // "NULL" + } else { + // TODO: error + this_length = cstrlen(my_dbi_getvalue(mdb->result, i, field_num)); + } + + if (max_length < this_length) { + max_length = this_length; + } + } + + return max_length; +} + +DBI_FIELD * my_dbi_fetch_field(B_DB *mdb) +{ + int i; + int dbi_index; + + Dmsg0(500, "my_dbi_fetch_field starts\n"); + + if (!mdb->fields || mdb->fields_size < mdb->num_fields) { + if (mdb->fields) { + free(mdb->fields); + } + Dmsg1(500, "allocating space for %d fields\n", mdb->num_fields); + mdb->fields = (DBI_FIELD *)malloc(sizeof(DBI_FIELD) * mdb->num_fields); + mdb->fields_size = mdb->num_fields; + + for (i = 0; i < mdb->num_fields; i++) { + dbi_index = i + 1; + Dmsg1(500, "filling field %d\n", i); + mdb->fields[i].name = (char *)dbi_result_get_field_name(mdb->result, dbi_index); + mdb->fields[i].max_length = my_dbi_max_length(mdb, i); + mdb->fields[i].type = dbi_result_get_field_type_idx(mdb->result, dbi_index); + mdb->fields[i].flags = dbi_result_get_field_attribs_idx(mdb->result, dbi_index); + + Dmsg4(500, "my_dbi_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n", + mdb->fields[i].name, mdb->fields[i].max_length, mdb->fields[i].type, + mdb->fields[i].flags); + } // end for + } // end if + + // increment field number for the next time around + + Dmsg0(500, "my_dbi_fetch_field finishes\n"); + return &mdb->fields[mdb->field_number++]; +} + +void my_dbi_data_seek(B_DB *mdb, int row) +{ + // set the row number to be returned on the next call + // to my_dbi_fetch_row + mdb->row_number = row; +} + +void my_dbi_field_seek(B_DB *mdb, int field) +{ + mdb->field_number = field; +} + +/* + * Note, if this routine returns 1 (failure), Bacula expects + * that no result has been stored. + * + * Returns: 0 on success + * 1 on failure + * + */ +int my_dbi_query(B_DB *mdb, const char *query) +{ + const char *errmsg; + Dmsg1(500, "my_dbi_query started %s\n", query); + // We are starting a new query. reset everything. + mdb->num_rows = -1; + mdb->row_number = -1; + mdb->field_number = -1; + + if (mdb->result) { + dbi_result_free(mdb->result); /* hmm, someone forgot to free?? */ + mdb->result = NULL; + } + + //for (int i=0; i < 10; i++) { + + mdb->result = (void **)dbi_conn_query(mdb->db, query); + + // if (mdb->result) { + // break; + // } + // bmicrosleep(5, 0); + //} + if (mdb->result == NULL) { + Dmsg2(50, "Query failed: %s %p\n", query, mdb->result); + goto bail_out; + } + + //mdb->status = (dbi_error_flag)dbi_conn_error_flag(mdb->db); + mdb->status = DBI_ERROR_NONE; + + if (mdb->status == DBI_ERROR_NONE) { + Dmsg1(500, "we have a result\n", query); + + // how many fields in the set? + mdb->num_fields = dbi_result_get_numfields(mdb->result); + Dmsg1(500, "we have %d fields\n", mdb->num_fields); + + mdb->num_rows = dbi_result_get_numrows(mdb->result); + Dmsg1(500, "we have %d rows\n", mdb->num_rows); + + mdb->status = (dbi_error_flag) 0; /* succeed */ + } else { + Dmsg1(50, "Result status failed: %s\n", query); + goto bail_out; + } + + Dmsg0(500, "my_dbi_query finishing\n"); + return mdb->status; + +bail_out: + mdb->status = dbi_conn_error_flag(mdb->db); + dbi_conn_error(mdb->db, &errmsg); + Dmsg4(500, "my_dbi_query we failed dbi error " + "'%s' '%p' '%d' flag '%d''\n", errmsg, mdb->result, mdb->result, mdb->status); + dbi_result_free(mdb->result); + mdb->result = NULL; + mdb->status = (dbi_error_flag) 1; /* failed */ + return mdb->status; +} + +void my_dbi_free_result(B_DB *mdb) +{ + int i; + + db_lock(mdb); + //Dmsg2(500, "my_dbi_free_result started result '%p' '%p'\n", mdb->result, mdb->result); + if (mdb->result != NULL) { + i = dbi_result_free(mdb->result); + if(i == 0) { + mdb->result = NULL; + //Dmsg2(500, "my_dbi_free_result result '%p' '%d'\n", mdb->result, mdb->result); + } + + } + + if (mdb->row) { + free(mdb->row); + mdb->row = NULL; + } + + if (mdb->fields) { + free(mdb->fields); + mdb->fields = NULL; + } + db_unlock(mdb); + //Dmsg0(500, "my_dbi_free_result finish\n"); + +} + +const char *my_dbi_strerror(B_DB *mdb) +{ + const char *errmsg; + + dbi_conn_error(mdb->db, &errmsg); + + return errmsg; +} + +// TODO: make batch insert work with libdbi +#ifdef HAVE_BATCH_FILE_INSERT + +int my_dbi_batch_start(JCR *jcr, B_DB *mdb) +{ + char *query = "COPY batch FROM STDIN"; + + Dmsg0(500, "my_postgresql_batch_start started\n"); + + if (my_postgresql_query(mdb, + "CREATE TEMPORARY TABLE batch (" + "fileindex int," + "jobid int," + "path varchar," + "name varchar," + "lstat varchar," + "md5 varchar)") == 1) + { + Dmsg0(500, "my_postgresql_batch_start failed\n"); + return 1; + } + + // We are starting a new query. reset everything. + mdb->num_rows = -1; + mdb->row_number = -1; + mdb->field_number = -1; + + my_postgresql_free_result(mdb); + + for (int i=0; i < 10; i++) { + mdb->result = PQexec(mdb->db, query); + if (mdb->result) { + break; + } + bmicrosleep(5, 0); + } + if (!mdb->result) { + Dmsg1(50, "Query failed: %s\n", query); + goto bail_out; + } + + mdb->status = PQresultStatus(mdb->result); + if (mdb->status == PGRES_COPY_IN) { + // how many fields in the set? + mdb->num_fields = (int) PQnfields(mdb->result); + mdb->num_rows = 0; + mdb->status = 1; + } else { + Dmsg1(50, "Result status failed: %s\n", query); + goto bail_out; + } + + Dmsg0(500, "my_postgresql_batch_start finishing\n"); + + return mdb->status; + +bail_out: + Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->db)); + mdb->status = 0; + PQclear(mdb->result); + mdb->result = NULL; + return mdb->status; +} + +/* set error to something to abort operation */ +int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error) +{ + int res; + int count=30; + Dmsg0(500, "my_postgresql_batch_end started\n"); + + if (!mdb) { /* no files ? */ + return 0; + } + + do { + res = PQputCopyEnd(mdb->db, error); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(500, "ok\n"); + mdb->status = 1; + } + + if (res <= 0) { + Dmsg0(500, "we failed\n"); + mdb->status = 0; + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db)); + } + + Dmsg0(500, "my_postgresql_batch_end finishing\n"); + + return mdb->status; +} + +int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) +{ + int res; + int count=30; + size_t len; + char *digest; + char ed1[50]; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); + my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n", + ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path, + mdb->esc_name, ar->attr, digest); + + do { + res = PQputCopyData(mdb->db, + mdb->cmd, + len); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(500, "ok\n"); + mdb->changes++; + mdb->status = 1; + } + + if (res <= 0) { + Dmsg0(500, "we failed\n"); + mdb->status = 0; + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db)); + } + + Dmsg0(500, "my_postgresql_batch_insert finishing\n"); + + return mdb->status; +} + +#endif /* HAVE_BATCH_FILE_INSERT */ + +/* my_dbi_getisnull + * like PQgetisnull + * int PQgetisnull(const PGresult *res, + * int row_number, + * int column_number); + * + * use dbi_result_seek_row to search in result set + */ +int my_dbi_getisnull(dbi_result *result, int row_number, int column_number) { + int i; + + if(row_number == 0) { + row_number++; + } + + column_number++; + + if(dbi_result_seek_row(result, row_number)) { + + i = dbi_result_field_is_null_idx(result,column_number); + + return i; + } else { + + return 0; + } + +} +/* my_dbi_getvalue + * like PQgetvalue; + * char *PQgetvalue(const PGresult *res, + * int row_number, + * int column_number); + * + * use dbi_result_seek_row to search in result set + * use example to return only strings + */ +char *my_dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number) { + + /* TODO: This is very bad, need refactoring */ + POOLMEM *buf = get_pool_memory(PM_FNAME); + //const unsigned char *bufb = (unsigned char *)malloc(sizeof(unsigned char) * 300); + //const unsigned char *bufb; + const char *errmsg; + const char *field_name; + unsigned short dbitype; + int32_t field_length = 0; + int64_t num; + + /* correct the index for dbi interface + * dbi index begins 1 + * I prefer do not change others functions + */ + Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n", + result, row_number, column_number); + + column_number++; + + if(row_number == 0) { + row_number++; + } + + Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n", + result, row_number, column_number); + + if(dbi_result_seek_row(result, row_number)) { + + field_name = dbi_result_get_field_name(result, column_number); + field_length = dbi_result_get_field_length(result, field_name); + dbitype = dbi_result_get_field_type_idx(result,column_number); + + if(field_length) { + buf = check_pool_memory_size(buf, field_length + 1); + } else { + buf = check_pool_memory_size(buf, 50); + } + + Dmsg5(500, "my_dbi_getvalue result '%p' type '%d' \n field name '%s' " + "field_length '%d' field_length size '%d'\n", + result, dbitype, field_name, field_length, sizeof_pool_memory(buf)); + + switch (dbitype) { + case DBI_TYPE_INTEGER: + num = dbi_result_get_longlong(result, field_name); + edit_int64(num, buf); + field_length = strlen(buf); + break; + case DBI_TYPE_STRING: + if(field_length) { + field_length = bsnprintf(buf, field_length + 1, "%s", + dbi_result_get_string(result, field_name)); + } else { + buf[0] = 0; + } + break; + case DBI_TYPE_BINARY: + /* dbi_result_get_binary return a NULL pointer if value is empty + * following, change this to what Bacula espected + */ + if(field_length) { + field_length = bsnprintf(buf, field_length + 1, "%s", + dbi_result_get_binary(result, field_name)); + } else { + buf[0] = 0; + } + break; + case DBI_TYPE_DATETIME: + time_t last; + struct tm tm; + + last = dbi_result_get_datetime(result, field_name); + + if(last == -1) { + field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00"); + } else { + (void)localtime_r(&last, &tm); + field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d", + (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + } + break; + } + + } else { + dbi_conn_error(dbi_result_get_conn(result), &errmsg); + Dmsg1(500, "my_dbi_getvalue error: %s\n", errmsg); + } + + Dmsg3(500, "my_dbi_getvalue finish result '%p' num bytes '%d' data '%s'\n", + result, field_length, buf); + return buf; +} + +int my_dbi_sql_insert_id(B_DB *mdb, char *table_name) +{ + /* + Obtain the current value of the sequence that + provides the serial value for primary key of the table. + + currval is local to our session. It is not affected by + other transactions. + + Determine the name of the sequence. + PostgreSQL automatically creates a sequence using + __seq. + At the time of writing, all tables used this format for + for their primary key:
id + Except for basefiles which has a primary key on baseid. + Therefore, we need to special case that one table. + + everything else can use the PostgreSQL formula. + */ + + char sequence[30]; + uint64_t id = 0; + + if (mdb->db_type == SQL_TYPE_POSTGRESQL) { + + if (strcasecmp(table_name, "basefiles") == 0) { + bstrncpy(sequence, "basefiles_baseid", sizeof(sequence)); + } else { + bstrncpy(sequence, table_name, sizeof(sequence)); + bstrncat(sequence, "_", sizeof(sequence)); + bstrncat(sequence, table_name, sizeof(sequence)); + bstrncat(sequence, "id", sizeof(sequence)); + } + + bstrncat(sequence, "_seq", sizeof(sequence)); + id = dbi_conn_sequence_last(mdb->db, NT_(sequence)); + } else { + id = dbi_conn_sequence_last(mdb->db, NT_(table_name)); + } + + return id; +} + +#ifdef HAVE_BATCH_FILE_INSERT +const char *my_dbi_batch_lock_path_query = + "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE"; + + +const char *my_dbi_batch_lock_filename_query = + "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE"; + +const char *my_dbi_batch_unlock_tables_query = "COMMIT"; + +const char *my_dbi_batch_fill_path_query = + "INSERT INTO Path (Path) " + "SELECT a.Path FROM " + "(SELECT DISTINCT Path FROM batch) AS a " + "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) "; + + +const char *my_dbi_batch_fill_filename_query = + "INSERT INTO Filename (Name) " + "SELECT a.Name FROM " + "(SELECT DISTINCT Name FROM batch) as a " + "WHERE NOT EXISTS " + "(SELECT Name FROM Filename WHERE Name = a.Name)"; +#endif /* HAVE_BATCH_FILE_INSERT */ + +#endif /* HAVE_DBI */ diff --git a/bacula/src/cats/sql_cmds.c b/bacula/src/cats/sql_cmds.c index b847033d18..6dcf3ef1c9 100644 --- a/bacula/src/cats/sql_cmds.c +++ b/bacula/src/cats/sql_cmds.c @@ -95,31 +95,7 @@ const char *drop_deltabs[] = { "DROP TABLE DelCandidates", NULL}; - -/* List of SQL commands to create temp table and indicies */ -const char *create_deltabs[] = { - "CREATE TEMPORARY TABLE DelCandidates (" -#if defined(HAVE_MYSQL) - "JobId INTEGER UNSIGNED NOT NULL, " - "PurgedFiles TINYINT, " - "FileSetId INTEGER UNSIGNED, " - "JobFiles INTEGER UNSIGNED, " - "JobStatus BINARY(1))", -#elif defined(HAVE_POSTGRESQL) - "JobId INTEGER NOT NULL, " - "PurgedFiles SMALLINT, " - "FileSetId INTEGER, " - "JobFiles INTEGER, " - "JobStatus char(1))", -#else - "JobId INTEGER UNSIGNED NOT NULL, " - "PurgedFiles TINYINT, " - "FileSetId INTEGER UNSIGNED, " - "JobFiles INTEGER UNSIGNED, " - "JobStatus CHAR)", -#endif - "CREATE INDEX DelInx1 ON DelCandidates (JobId)", - NULL}; +const char *create_delindex = "CREATE INDEX DelInx1 ON DelCandidates (JobId)"; /* Fill candidates table with all Jobs subject to being deleted. * This is used for pruning Jobs (first the files, then the Jobs). @@ -270,32 +246,6 @@ const char *uar_list_jobs = "FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus='T' " "AND Type='B' ORDER BY StartTime DESC LIMIT 20"; -#ifdef HAVE_MYSQL -/* MYSQL IS NOT STANDARD SQL !!!!! */ -/* List Jobs where a particular file is saved */ -const char *uar_file = - "SELECT Job.JobId as JobId," - "CONCAT(Path.Path,Filename.Name) as Name, " - "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " - "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " - "AND Client.ClientId=Job.ClientId " - "AND Job.JobId=File.JobId " - "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " - "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"; -#else -/* List Jobs where a particular file is saved */ -const char *uar_file = - "SELECT Job.JobId as JobId," - "Path.Path||Filename.Name as Name, " - "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " - "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " - "AND Client.ClientId=Job.ClientId " - "AND Job.JobId=File.JobId " - "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " - "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"; -#endif - - /* * Find all files for a particular JobId and insert them into * the tree during a restore. @@ -309,44 +259,6 @@ const char *uar_sel_files = const char *uar_del_temp = "DROP TABLE temp"; const char *uar_del_temp1 = "DROP TABLE temp1"; -const char *uar_create_temp = - "CREATE TEMPORARY TABLE temp (" -#ifdef HAVE_POSTGRESQL - "JobId INTEGER NOT NULL," - "JobTDate BIGINT," - "ClientId INTEGER," - "Level CHAR," - "JobFiles INTEGER," - "JobBytes BIGINT," - "StartTime TEXT," - "VolumeName TEXT," - "StartFile INTEGER," - "VolSessionId INTEGER," - "VolSessionTime INTEGER)"; -#else - "JobId INTEGER UNSIGNED NOT NULL," - "JobTDate BIGINT UNSIGNED," - "ClientId INTEGER UNSIGNED," - "Level CHAR," - "JobFiles INTEGER UNSIGNED," - "JobBytes BIGINT UNSIGNED," - "StartTime TEXT," - "VolumeName TEXT," - "StartFile INTEGER UNSIGNED," - "VolSessionId INTEGER UNSIGNED," - "VolSessionTime INTEGER UNSIGNED)"; -#endif - -const char *uar_create_temp1 = - "CREATE TEMPORARY TABLE temp1 (" -#ifdef HAVE_POSTGRESQL - "JobId INTEGER NOT NULL," - "JobTDate BIGINT)"; -#else - "JobId INTEGER UNSIGNED NOT NULL," - "JobTDate BIGINT UNSIGNED)"; -#endif - const char *uar_last_full = "INSERT INTO temp1 SELECT Job.JobId,JobTdate " "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s " @@ -460,6 +372,143 @@ const char *uar_jobids_fileindex = "AND Filename.FilenameId=File.FilenameId " "ORDER BY Job.StartTime DESC LIMIT 1"; +/* Query to get list of files from table -- presuably built by an external program */ +const char *uar_jobid_fileindex_from_table = + "SELECT JobId,FileIndex from %s"; + + +/* + * + * This file contains all the SQL commands issued by the Director + * + * Kern Sibbald, July MMII + * + * Version $Id$ + */ +/* + * Note, PostgreSQL imposes some constraints on using DISTINCT and GROUP BY + * for example, the following is illegal in PostgreSQL: + * SELECT DISTINCT JobId FROM temp ORDER BY StartTime ASC; + * because all the ORDER BY expressions must appear in the SELECT list! + */ + + +#include "bacula.h" +#include "cats.h" + +/* ====== ua_prune.c */ + +/* List of SQL commands to create temp table and indicies */ +const char *create_deltabs[3] = { + /* MySQL */ + "CREATE TEMPORARY TABLE DelCandidates (" + "JobId INTEGER UNSIGNED NOT NULL, " + "PurgedFiles TINYINT, " + "FileSetId INTEGER UNSIGNED, " + "JobFiles INTEGER UNSIGNED, " + "JobStatus BINARY(1))", + /* Postgresql */ + "CREATE TEMPORARY TABLE DelCandidates (" + "JobId INTEGER NOT NULL, " + "PurgedFiles SMALLINT, " + "FileSetId INTEGER, " + "JobFiles INTEGER, " + "JobStatus char(1))", + /* SQLite */ + "CREATE TEMPORARY TABLE DelCandidates (" + "JobId INTEGER UNSIGNED NOT NULL, " + "PurgedFiles TINYINT, " + "FileSetId INTEGER UNSIGNED, " + "JobFiles INTEGER UNSIGNED, " + "JobStatus CHAR)"}; + +/* ======= ua_restore.c */ + +/* List Jobs where a particular file is saved */ +const char *uar_file[3] = { + /* Mysql */ + "SELECT Job.JobId as JobId," + "CONCAT(Path.Path,Filename.Name) as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20", + /* Postgresql */ + "SELECT Job.JobId as JobId," + "Path.Path||Filename.Name as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20", + /* SQLite */ + "SELECT Job.JobId as JobId," + "Path.Path||Filename.Name as Name, " + "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes " + "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' " + "AND Client.ClientId=Job.ClientId " + "AND Job.JobId=File.JobId " + "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId " + "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"}; + +const char *uar_create_temp[3] = { + /* Mysql */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED," + "ClientId INTEGER UNSIGNED," + "Level CHAR," + "JobFiles INTEGER UNSIGNED," + "JobBytes BIGINT UNSIGNED," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER UNSIGNED," + "VolSessionId INTEGER UNSIGNED," + "VolSessionTime INTEGER UNSIGNED)", + /* Postgresql */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER NOT NULL," + "JobTDate BIGINT," + "ClientId INTEGER," + "Level CHAR," + "JobFiles INTEGER," + "JobBytes BIGINT," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER," + "VolSessionId INTEGER," + "VolSessionTime INTEGER)", + /* SQLite */ + "CREATE TEMPORARY TABLE temp (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED," + "ClientId INTEGER UNSIGNED," + "Level CHAR," + "JobFiles INTEGER UNSIGNED," + "JobBytes BIGINT UNSIGNED," + "StartTime TEXT," + "VolumeName TEXT," + "StartFile INTEGER UNSIGNED," + "VolSessionId INTEGER UNSIGNED," + "VolSessionTime INTEGER UNSIGNED)"}; + +const char *uar_create_temp1[3] = { + /* Mysql */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED)", + /* Postgresql */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER NOT NULL," + "JobTDate BIGINT)", + /* SQLite */ + "CREATE TEMPORARY TABLE temp1 (" + "JobId INTEGER UNSIGNED NOT NULL," + "JobTDate BIGINT UNSIGNED)"}; + /* Query to get all files in a directory -- no recursing * Note, for PostgreSQL since it respects the "Single Value * rule", the results of the SELECT will be unoptimized. @@ -467,8 +516,8 @@ const char *uar_jobids_fileindex = * for each time it was backed up. */ -#ifdef HAVE_POSTGRESQL -const char *uar_jobid_fileindex_from_dir = +const char *uar_jobid_fileindex_from_dir[3] = { + /* Mysql */ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " "WHERE Job.JobId IN (%s) " "AND Job.JobId=File.JobId " @@ -476,9 +525,18 @@ const char *uar_jobid_fileindex_from_dir = "AND Client.Name='%s' " "AND Job.ClientId=Client.ClientId " "AND Path.PathId=File.Pathid " - "AND Filename.FilenameId=File.FilenameId"; -#else -const char *uar_jobid_fileindex_from_dir = + "AND Filename.FilenameId=File.FilenameId " + "GROUP BY File.FileIndex ", + /* Postgresql */ + "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " + "WHERE Job.JobId IN (%s) " + "AND Job.JobId=File.JobId " + "AND Path.Path='%s' " + "AND Client.Name='%s' " + "AND Job.ClientId=Client.ClientId " + "AND Path.PathId=File.Pathid " + "AND Filename.FilenameId=File.FilenameId", + /* SQLite */ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client " "WHERE Job.JobId IN (%s) " "AND Job.JobId=File.JobId " @@ -487,9 +545,4 @@ const char *uar_jobid_fileindex_from_dir = "AND Job.ClientId=Client.ClientId " "AND Path.PathId=File.Pathid " "AND Filename.FilenameId=File.FilenameId " - "GROUP BY File.FileIndex "; -#endif - -/* Query to get list of files from table -- presuably built by an external program */ -const char *uar_jobid_fileindex_from_table = - "SELECT JobId,FileIndex from %s"; + "GROUP BY File.FileIndex "}; diff --git a/bacula/src/cats/sql_cmds.h b/bacula/src/cats/sql_cmds.h index 5043ac1c56..5d4678728e 100644 --- a/bacula/src/cats/sql_cmds.h +++ b/bacula/src/cats/sql_cmds.h @@ -26,10 +26,11 @@ Switzerland, email:ftf@fsfeurope.org. */ + extern const char CATS_IMP_EXP *client_backups; extern const char CATS_IMP_EXP *list_pool; extern const char CATS_IMP_EXP *drop_deltabs[]; -extern const char CATS_IMP_EXP *create_deltabs[]; +extern const char CATS_IMP_EXP *create_delindex; extern const char CATS_IMP_EXP *insert_delcand; extern const char CATS_IMP_EXP *select_backup_del; extern const char CATS_IMP_EXP *select_verify_del; @@ -49,13 +50,10 @@ extern const char CATS_IMP_EXP *sel_JobMedia; extern const char CATS_IMP_EXP *upd_Purged; extern const char CATS_IMP_EXP *uar_list_jobs; -extern const char CATS_IMP_EXP *uar_file; extern const char CATS_IMP_EXP *uar_count_files; extern const char CATS_IMP_EXP *uar_sel_files; extern const char CATS_IMP_EXP *uar_del_temp; extern const char CATS_IMP_EXP *uar_del_temp1; -extern const char CATS_IMP_EXP *uar_create_temp; -extern const char CATS_IMP_EXP *uar_create_temp1; extern const char CATS_IMP_EXP *uar_last_full; extern const char CATS_IMP_EXP *uar_full; extern const char CATS_IMP_EXP *uar_inc; @@ -68,6 +66,12 @@ extern const char CATS_IMP_EXP *uar_dif; extern const char CATS_IMP_EXP *uar_sel_all_temp; extern const char CATS_IMP_EXP *uar_count_files; extern const char CATS_IMP_EXP *uar_jobids_fileindex; -extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir; extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table; extern const char CATS_IMP_EXP *uar_sel_jobid_temp; + +extern const char CATS_IMP_EXP *create_deltabs[3]; + +extern const char CATS_IMP_EXP *uar_file[3]; +extern const char CATS_IMP_EXP *uar_create_temp[3]; +extern const char CATS_IMP_EXP *uar_create_temp1[3]; +extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir[3]; diff --git a/bacula/src/cats/sql_create.c b/bacula/src/cats/sql_create.c index 3b781bb4ee..38268bdc98 100644 --- a/bacula/src/cats/sql_create.c +++ b/bacula/src/cats/sql_create.c @@ -43,7 +43,7 @@ static const int dbglevel = 500; -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * @@ -1124,4 +1124,4 @@ bool db_write_batch_file_records(JCR *jcr) #endif /* ! HAVE_BATCH_FILE_INSERT */ -#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL */ +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */ diff --git a/bacula/src/cats/sql_delete.c b/bacula/src/cats/sql_delete.c index 1476ba423d..676ae861ee 100644 --- a/bacula/src/cats/sql_delete.c +++ b/bacula/src/cats/sql_delete.c @@ -44,7 +44,7 @@ #include "cats.h" -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * * Generic Routines (or almost generic) diff --git a/bacula/src/cats/sql_find.c b/bacula/src/cats/sql_find.c index 5894a60325..2daadb9853 100644 --- a/bacula/src/cats/sql_find.c +++ b/bacula/src/cats/sql_find.c @@ -46,7 +46,7 @@ #include "bacula.h" #include "cats.h" -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * diff --git a/bacula/src/cats/sql_get.c b/bacula/src/cats/sql_get.c index 981f10ab78..f53257436d 100644 --- a/bacula/src/cats/sql_get.c +++ b/bacula/src/cats/sql_get.c @@ -45,7 +45,7 @@ #include "bacula.h" #include "cats.h" -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * @@ -1019,4 +1019,4 @@ bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr) } -#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL*/ +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */ diff --git a/bacula/src/cats/sql_list.c b/bacula/src/cats/sql_list.c index 25647c8d80..ec3e505abe 100644 --- a/bacula/src/cats/sql_list.c +++ b/bacula/src/cats/sql_list.c @@ -42,7 +42,7 @@ #include "bacula.h" #include "cats.h" -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * diff --git a/bacula/src/cats/sql_update.c b/bacula/src/cats/sql_update.c index 5b524108d4..5f037fba3c 100644 --- a/bacula/src/cats/sql_update.c +++ b/bacula/src/cats/sql_update.c @@ -41,7 +41,7 @@ #include "bacula.h" #include "cats.h" -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI /* ----------------------------------------------------------------------- * diff --git a/bacula/src/dird/bacula-dir.conf.in b/bacula/src/dird/bacula-dir.conf.in index f2e72d6652..c39f672118 100644 --- a/bacula/src/dird/bacula-dir.conf.in +++ b/bacula/src/dird/bacula-dir.conf.in @@ -231,6 +231,8 @@ Storage { # Generic catalog service Catalog { Name = MyCatalog +# Uncomment the following line if you want the dbi driver +# dbdriver = "dbi:mysql"; dbaddress = 127.0.0.1; dbport = 3306 dbname = @db_name@; user = @db_user@; password = "" } diff --git a/bacula/src/dird/ua_prune.c b/bacula/src/dird/ua_prune.c index f90dd2e06b..2083fc53dc 100644 --- a/bacula/src/dird/ua_prune.c +++ b/bacula/src/dird/ua_prune.c @@ -259,14 +259,16 @@ static void drop_temp_tables(UAContext *ua) static bool create_temp_tables(UAContext *ua) { - int i; /* Create temp tables and indicies */ - for (i=0; create_deltabs[i]; i++) { - if (!db_sql_query(ua->db, create_deltabs[i], NULL, (void *)NULL)) { - ua->error_msg("%s", db_strerror(ua->db)); - Dmsg0(050, "create DelTables table failed\n"); - return false; - } + if (!db_sql_query(ua->db, create_deltabs[db_type], NULL, (void *)NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(050, "create DelTables table failed\n"); + return false; + } + if (!db_sql_query(ua->db, create_delindex, NULL, (void *)NULL)) { + ua->error_msg("%s", db_strerror(ua->db)); + Dmsg0(050, "create DelInx1 index failed\n"); + return false; } return true; } diff --git a/bacula/src/dird/ua_restore.c b/bacula/src/dird/ua_restore.c index 06287fcd2e..9d9263b70a 100644 --- a/bacula/src/dird/ua_restore.c +++ b/bacula/src/dird/ua_restore.c @@ -582,7 +582,7 @@ static int user_select_jobids_or_files(UAContext *ua, RESTORE_CTX *rx) len = strlen(ua->cmd); fname = (char *)malloc(len * 2 + 1); db_escape_string(ua->jcr, ua->db, fname, ua->cmd, len); - Mmsg(rx->query, uar_file, rx->ClientName, fname); + Mmsg(rx->query, uar_file[db_type], rx->ClientName, fname); free(fname); gui_save = ua->jcr->gui; ua->jcr->gui = true; @@ -896,14 +896,13 @@ static bool insert_file_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *f */ static bool insert_dir_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *dir, char *date) -{ +{ strip_trailing_junk(dir); if (*rx->JobIds == 0) { ua->error_msg(_("No JobId specified cannot continue.\n")); return false; } else { - Mmsg(rx->query, uar_jobid_fileindex_from_dir, rx->JobIds, - dir, rx->ClientName); + Mmsg(rx->query, uar_jobid_fileindex_from_dir[db_type], rx->JobIds, dir, rx->ClientName); } rx->found = false; /* Find and insert jobid and File Index */ @@ -1122,10 +1121,10 @@ static bool select_backups_before_date(UAContext *ua, RESTORE_CTX *rx, char *dat /* Create temp tables */ db_sql_query(ua->db, uar_del_temp, NULL, NULL); db_sql_query(ua->db, uar_del_temp1, NULL, NULL); - if (!db_sql_query(ua->db, uar_create_temp, NULL, NULL)) { + if (!db_sql_query(ua->db, uar_create_temp[db_type], NULL, NULL)) { ua->error_msg("%s\n", db_strerror(ua->db)); } - if (!db_sql_query(ua->db, uar_create_temp1, NULL, NULL)) { + if (!db_sql_query(ua->db, uar_create_temp1[db_type], NULL, NULL)) { ua->error_msg("%s\n", db_strerror(ua->db)); } /* diff --git a/bacula/src/stored/bscan.c b/bacula/src/stored/bscan.c index 0127b181c5..c65348f6c8 100644 --- a/bacula/src/stored/bscan.c +++ b/bacula/src/stored/bscan.c @@ -82,10 +82,12 @@ static ATTR *attr; static time_t lasttime = 0; +static const char *db_driver = "NULL"; static const char *db_name = "bacula"; static const char *db_user = "bacula"; static const char *db_password = ""; static const char *db_host = NULL; +static int db_port = 0; static const char *wd = NULL; static bool update_db = false; static bool update_vol_info = false; @@ -119,10 +121,12 @@ PROG_COPYRIGHT " -d set debug level to \n" " -dt print timestamp in debug output\n" " -m update media info in database\n" +" -D specify the driver database name (default NULL)\n" " -n specify the database name (default bacula)\n" " -u specify database user name (default bacula)\n" " -P specify database password (default none)\n" " -h specify database host (default NULL)\n" +" -t specify database port (default 0)\n" " -p proceed inspite of I/O errors\n" " -r list records\n" " -s synchronize or store in database\n" @@ -150,7 +154,7 @@ int main (int argc, char *argv[]) OSDependentInit(); - while ((ch = getopt(argc, argv, "b:c:d:h:mn:pP:rsSu:vV:w:?")) != -1) { + while ((ch = getopt(argc, argv, "b:c:dD:h:p:mn:pP:rsStu:vV:w:?")) != -1) { switch (ch) { case 'S' : showProgress = true; @@ -166,6 +170,10 @@ int main (int argc, char *argv[]) configfile = bstrdup(optarg); break; + case 'D': + db_driver = optarg; + break; + case 'd': /* debug level */ if (*optarg == 't') { dbg_timestamp = true; @@ -180,6 +188,10 @@ int main (int argc, char *argv[]) case 'h': db_host = optarg; break; + + case 't': + db_port = atoi(optarg); + break; case 'm': update_vol_info = true; @@ -282,8 +294,8 @@ int main (int argc, char *argv[]) edit_uint64(currentVolumeSize, ed1)); } - if ((db=db_init_database(NULL, db_name, db_user, db_password, - db_host, 0, NULL, 0)) == NULL) { + if ((db=db_init(NULL, db_driver, db_name, db_user, db_password, + db_host, db_port, NULL, 0)) == NULL) { Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n")); } if (!db_open_database(NULL, db)) { diff --git a/bacula/technotes-2.3 b/bacula/technotes-2.3 index 14712f0550..7d27060e4a 100644 --- a/bacula/technotes-2.3 +++ b/bacula/technotes-2.3 @@ -2,6 +2,9 @@ General: 22Feb08 +kes Apply patch (with some difficulties) from Joao Henrique Freitas + , which adds support for libdbi as a Bacula + database driver. kes Add patch from Martin Schmid scm@apsag.com that checks to see if ftruncate() actually works. In the case of some (cheap) NAS devices, it does not, and so recycling NAS Volumes does not work. The code -- 2.39.2