Howard Thomson
Jaime Ventura
Jan Kesten
+Joao Henrique Freitas
John Goerzen
John Kodis
John Walker
+AC_DEFUN([BA_CHECK_DBI_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for DBI support)
+AC_ARG_WITH(dbi,
+[
+ --with-dbi@<:@=DIR@:>@ Include DBI support. DIR is the DBD base
+ install directory, default is to search through
+ a number of common places for the DBI files.],
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/local/dbi/include
+ if test -d /usr/local/lib64; then
+ DBI_LIBDIR=/usr/local/lib64
+ else
+ DBI_LIBDIR=/usr/local/lib
+ fi
+ DBI_BINDIR=/usr/local/bin
+ elif test -f /usr/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ DBI_LIBDIR=/usr/lib64
+ else
+ DBI_LIBDIR=/usr/lib
+ fi
+ DBI_BINDIR=/usr/bin
+ elif test -f $prefix/include/dbi/dbi.h; then
+ DBI_INCDIR=$prefix/include
+ if test -d $prefix/lib64; then
+ DBI_LIBDIR=$prefix/lib64
+ else
+ DBI_LIBDIR=$prefix/lib
+ fi
+ DBI_BINDIR=$prefix/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find dbi.h in standard locations)
+ fi
+ if test -d /usr/local/lib/dbd; then
+ DRIVERDIR=/usr/local/lib/dbd
+ if test -d /usr/local/lib64/dbd; then
+ DRIVERDIR=/usr/local/lib64/dbd
+ else
+ DRIVERDIR=/usr/local/lib/dbd
+ fi
+ elif test -d /usr/lib/dbd; then
+ DRIVERDIR=/usr/lib/dbd
+ if test -d /usr/lib64/dbd; then
+ DRIVERDIR=/usr/lib64/dbd
+ else
+ DRIVERDIR=/usr/lib/dbd
+ fi
+ elif test -d $prefix/lib/dbd; then
+ if test -d $prefix/lib64/dbd; then
+ DRIVERDIR=$prefix/lib64/dbd
+ else
+ DRIVERDIR=$prefix/lib/dbd
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find DBD drivers in standard locations)
+ fi
+ else
+ if test -f $withval/dbi.h; then
+ DBI_INCDIR=$withval
+ DBI_LIBDIR=$withval
+ DBI_BINDIR=$withval
+ elif test -f $withval/include/dbi/dbi.h; then
+ DBI_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ DBI_LIBDIR=$withval/lib64
+ else
+ DBI_LIBDIR=$withval/lib
+ fi
+ DBI_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid DBI directory $withval - unable to find dbi.h under $withval)
+ fi
+ if test -d $withval/dbd; then
+ DRIVERDIR=$withval/dbd
+ elif test -d $withval/lib/; then
+ if test -d $withval/lib64/dbd; then
+ DRIVERDIR=$withval/lib64/dbd
+ else
+ DRIVERDIR=$withval/lib/dbd
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid DBD driver directory $withval - unable to find DBD drivers under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$DBI_INCDIR
+ SQL_LFLAGS="-L$DBI_LIBDIR -ldbi"
+ SQL_BINDIR=$DBI_BINDIR
+ SQL_LIB=$DBI_LIBDIR/libdbi.a
+ DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\""
+
+ AC_DEFINE(HAVE_DBI)
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_dbi=yes
+ db_type=DBI
+ DB_TYPE=dbi
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+AC_SUBST(DBI_DBD_DRIVERDIR)
+
+])
+
+
AC_DEFUN([BA_CHECK_MYSQL_DB],
[
db_found=no
/* Define to `int' if <sys/types.h> doesn't define. */
#undef ssize_t
+/* Define if you want to use DBI */
+#undef HAVE_DBI
+
/* Define if you want to use PostgreSQL */
#undef HAVE_POSTGRESQL
*/
#undef HAVE_DECL_TZNAME
+/* Define to 1 if you have the declaration of `tzname', and to 0 if you don't.
+ */
+#undef HAVE_DECL_TZNAME
+
/* Define to 1 if you have the declaration of `_snprintf', and to 0 if you
don't. */
#undef HAVE_DECL__SNPRINTF
support_sqlite=no
support_sqlite3=no
support_postgresql=no
+support_dbi=no
support_smartalloc=yes
support_readline=yes
support_conio=yes
BA_CHECK_SQLITE_DB
+BA_CHECK_DBI_DB
+
AC_SUBST(cats)
AC_SUBST(DB_TYPE)
echo " "
echo " "
echo "You have not specified either --enable-client-only or one of the"
- echo " supported databases: MySQL, PostgreSQL, SQLite3 or SQLite."
+ echo " supported databases: MySQL, PostgreSQL, SQLite3, SQLite or DBI."
echo " This is not permitted. Please reconfigure."
echo " "
echo "Aborting the configuration ..."
SQL_LFLAGS
SQL_INCLUDE
SQL_BINDIR
+DBI_DBD_DRIVERDIR
cats
DB_TYPE
GETCONF
--with-sqlite[=DIR] Include SQLite support. DIR is the SQLite base
install directory, default is to search through
a number of common places for the SQLite files.
+
+ --with-dbi[=DIR] Include DBI support. DIR is the DBD base
+ install directory, default is to search through
+ a number of common places for the DBI files.
--with-x use the X Window System
Some influential environment variables:
support_sqlite=no
support_sqlite3=no
support_postgresql=no
+support_dbi=no
support_smartalloc=yes
support_readline=yes
support_conio=yes
+db_found=no
+{ echo "$as_me:$LINENO: checking for DBI support" >&5
+echo $ECHO_N "checking for DBI support... $ECHO_C" >&6; }
+
+# Check whether --with-dbi was given.
+if test "${with_dbi+set}" = set; then
+ withval=$with_dbi;
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/local/dbi/include
+ if test -d /usr/local/lib64; then
+ DBI_LIBDIR=/usr/local/lib64
+ else
+ DBI_LIBDIR=/usr/local/lib
+ fi
+ DBI_BINDIR=/usr/local/bin
+ elif test -f /usr/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ DBI_LIBDIR=/usr/lib64
+ else
+ DBI_LIBDIR=/usr/lib
+ fi
+ DBI_BINDIR=/usr/bin
+ elif test -f $prefix/include/dbi/dbi.h; then
+ DBI_INCDIR=$prefix/include
+ if test -d $prefix/lib64; then
+ DBI_LIBDIR=$prefix/lib64
+ else
+ DBI_LIBDIR=$prefix/lib
+ fi
+ DBI_BINDIR=$prefix/bin
+ else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+ { { echo "$as_me:$LINENO: error: Unable to find dbi.h in standard locations" >&5
+echo "$as_me: error: Unable to find dbi.h in standard locations" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ if test -d /usr/local/lib/dbd; then
+ DRIVERDIR=/usr/local/lib/dbd
+ if test -d /usr/local/lib64/dbd; then
+ DRIVERDIR=/usr/local/lib64/dbd
+ else
+ DRIVERDIR=/usr/local/lib/dbd
+ fi
+ elif test -d /usr/lib/dbd; then
+ DRIVERDIR=/usr/lib/dbd
+ if test -d /usr/lib64/dbd; then
+ DRIVERDIR=/usr/lib64/dbd
+ else
+ DRIVERDIR=/usr/lib/dbd
+ fi
+ elif test -d $prefix/lib/dbd; then
+ if test -d $prefix/lib64/dbd; then
+ DRIVERDIR=$prefix/lib64/dbd
+ else
+ DRIVERDIR=$prefix/lib/dbd
+ fi
+ else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+ { { echo "$as_me:$LINENO: error: Unable to find DBD drivers in standard locations" >&5
+echo "$as_me: error: Unable to find DBD drivers in standard locations" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ else
+ if test -f $withval/dbi.h; then
+ DBI_INCDIR=$withval
+ DBI_LIBDIR=$withval
+ DBI_BINDIR=$withval
+ elif test -f $withval/include/dbi/dbi.h; then
+ DBI_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ DBI_LIBDIR=$withval/lib64
+ else
+ DBI_LIBDIR=$withval/lib
+ fi
+ DBI_BINDIR=$withval/bin
+ else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+ { { echo "$as_me:$LINENO: error: Invalid DBI directory $withval - unable to find dbi.h under $withval" >&5
+echo "$as_me: error: Invalid DBI directory $withval - unable to find dbi.h under $withval" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ if test -d $withval/dbd; then
+ DRIVERDIR=$withval/dbd
+ elif test -d $withval/lib/; then
+ if test -d $withval/lib64/dbd; then
+ DRIVERDIR=$withval/lib64/dbd
+ else
+ DRIVERDIR=$withval/lib/dbd
+ fi
+ else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+ { { echo "$as_me:$LINENO: error: Invalid DBD driver directory $withval - unable to find DBD drivers under $withval" >&5
+echo "$as_me: error: Invalid DBD driver directory $withval - unable to find DBD drivers under $withval" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+ SQL_INCLUDE=-I$DBI_INCDIR
+ SQL_LFLAGS="-L$DBI_LIBDIR -ldbi"
+ SQL_BINDIR=$DBI_BINDIR
+ SQL_LIB=$DBI_LIBDIR/libdbi.a
+ DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\""
+
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_DBI 1
+_ACEOF
+
+ { echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+ db_found=yes
+ support_dbi=yes
+ db_type=DBI
+ DB_TYPE=dbi
+
+ else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+ fi
+
+else
+
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+
+fi
+
+
+
+
+
+
+
+
+
# -------------------------------------------
SQL_LFLAGS!$SQL_LFLAGS$ac_delim
SQL_INCLUDE!$SQL_INCLUDE$ac_delim
SQL_BINDIR!$SQL_BINDIR$ac_delim
+DBI_DBD_DRIVERDIR!$DBI_DBD_DRIVERDIR$ac_delim
cats!$cats$ac_delim
DB_TYPE!$DB_TYPE$ac_delim
GETCONF!$GETCONF$ac_delim
LTLIBOBJS!$LTLIBOBJS$ac_delim
_ACEOF
- if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 53; then
+ if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 54; then
break
elif $ac_last_try; then
{ { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
echo " "
echo " "
echo "You have not specified either --enable-client-only or one of the"
- echo " supported databases: MySQL, PostgreSQL, SQLite3 or SQLite."
+ echo " supported databases: MySQL, PostgreSQL, SQLite3, SQLite or DBI."
echo " This is not permitted. Please reconfigure."
echo " "
echo "Aborting the configuration ..."
# this dir relative to top dir
thisdir = src/cats
-CPPFLAGS += -DBUILDING_CATS
+CPPFLAGS += -DBUILDING_CATS @DBI_DBD_DRIVERDIR@
DEBUG=@DEBUG@
MKDIR=$(topdir)/autoconf/mkinstalldirs
SVRSRCS = cats.c sql.c
SVROBJS = cats.o sql.o
-LIBSRCS = mysql.c bdb.c \
+LIBSRCS = mysql.c bdb.c dbi.c \
sql.c sql_cmds.c sql_create.c sql_delete.c sql_find.c \
sql_get.c sql_list.c sql_update.c sqlite.c \
postgresql.c
-LIBOBJS = mysql.o bdb.o \
+LIBOBJS = mysql.o bdb.o dbi.o \
sql.o sql_cmds.o sql_create.o sql_delete.o sql_find.o \
sql_get.o sql_list.o sql_update.o sqlite.o \
postgresql.o
#define SQL_ROW POSTGRESQL_ROW
#define SQL_FIELD POSTGRESQL_FIELD
+#else
+
+#ifdef HAVE_DBI
+
+#define BDB_VERSION 10
+
+#include <dbi/dbi.h>
+
+#define IS_NUM(x) ((x) == 1 || (x) == 2 )
+#define IS_NOT_NULL(x) ((x) == (1 << 0))
+
+typedef char **DBI_ROW;
+typedef struct dbi_field {
+ char *name;
+ int max_length;
+ unsigned int type;
+ unsigned int flags; // 1 == not null
+} DBI_FIELD;
+
+
+/*
+ * This is the "real" definition that should only be
+ * used inside sql.c and associated database interface
+ * subroutines.
+ *
+ * D B I
+ */
+struct B_DB {
+ BQUEUE bq; /* queue control */
+ brwlock_t lock; /* transaction lock */
+ dbi_conn *db;
+ dbi_result *result;
+ dbi_error_flag status;
+ DBI_ROW row;
+ DBI_FIELD *fields;
+ int num_rows;
+ int row_size; /* size of malloced rows */
+ int num_fields;
+ int fields_size; /* size of malloced fields */
+ int row_number; /* row number from my_postgresql_data_seek */
+ int field_number; /* field number from my_postgresql_field_seek */
+ int ref_count;
+ int db_type; /* DBI driver defined */
+ char *db_driverdir ; /* DBI driver dir */
+ char *db_driver; /* DBI type database */
+ char *db_name;
+ char *db_user;
+ char *db_password;
+ char *db_address; /* host address */
+ char *db_socket; /* socket for local access */
+ int db_port; /* port of host address */
+ int have_insert_id; /* do have insert_id() */
+ bool connected;
+ POOLMEM *errmsg; /* nicely edited error message */
+ POOLMEM *cmd; /* SQL command string */
+ POOLMEM *cached_path;
+ int cached_path_len; /* length of cached path */
+ uint32_t cached_path_id;
+ bool allow_transactions; /* transactions allowed */
+ bool transaction; /* transaction started */
+ int changes; /* changes made to db */
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_path; /* Escaped path name */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+};
+
+void my_dbi_free_result(B_DB *mdb);
+DBI_ROW my_dbi_fetch_row (B_DB *mdb);
+int my_dbi_query (B_DB *mdb, const char *query);
+void my_dbi_data_seek (B_DB *mdb, int row);
+void my_dbi_field_seek (B_DB *mdb, int row);
+DBI_FIELD * my_dbi_fetch_field(B_DB *mdb);
+const char * my_dbi_strerror (B_DB *mdb);
+int my_dbi_getisnull (dbi_result *result, int row_number, int column_number);
+char * my_dbi_getvalue (dbi_result *result, int row_number, unsigned int column_number);
+int my_dbi_sql_insert_id(B_DB *mdb, char *table_name);
+
+// TODO: do batch insert in DBI
+//int my_dbi_batch_start(JCR *jcr, B_DB *mdb);
+//int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error);
+//typedef struct ATTR_DBR ATTR_DBR;
+//int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+//char *my_dbi_copy_escape(char *dest, char *src, size_t len);
+
+//extern const char* my_dbi_batch_lock_path_query;
+//extern const char* my_dbi_batch_lock_filename_query;
+//extern const char* my_dbi_batch_unlock_tables_query;
+//extern const char* my_dbi_batch_fill_filename_query;
+//extern const char* my_dbi_batch_fill_path_query;
+
+/* "Generic" names for easier conversion */
+#define sql_store_result(x) (x)->result
+#define sql_free_result(x) my_dbi_free_result(x)
+#define sql_fetch_row(x) my_dbi_fetch_row(x)
+#define sql_query(x, y) my_dbi_query((x), (y))
+#define sql_close(x) dbi_conn_close((x)->db)
+#define sql_strerror(x) my_dbi_strerror(x)
+#define sql_num_rows(x) dbi_result_get_numrows((x)->result)
+#define sql_data_seek(x, i) my_dbi_data_seek((x), (i))
+/* #define sql_affected_rows(x) dbi_result_get_numrows_affected((x)->result) */
+#define sql_affected_rows(x) 1
+#define sql_insert_id(x,y) my_dbi_sql_insert_id((x), (y))
+#define sql_field_seek(x, y) my_dbi_field_seek((x), (y))
+#define sql_fetch_field(x) my_dbi_fetch_field(x)
+#define sql_num_fields(x) ((x)->num_fields)
+// TODO: do dbi batch insert
+#define sql_batch_start(x,y) my_dbi_batch_start(x,y)
+#define sql_batch_end(x,y,z) my_dbi_batch_end(x,y,z)
+#define sql_batch_insert(x,y,z) my_dbi_batch_insert(x,y,z)
+#define sql_batch_lock_path_query my_dbi_batch_lock_path_query
+#define sql_batch_lock_filename_query my_dbi_batch_lock_filename_query
+#define sql_batch_unlock_tables_query my_dbi_batch_unlock_tables_query
+#define sql_batch_fill_filename_query my_dbi_batch_fill_filename_query
+#define sql_batch_fill_path_query my_dbi_batch_fill_path_query
+
+#define SQL_ROW DBI_ROW
+#define SQL_FIELD DBI_FIELD
+
+
#else /* USE BACULA DB routines */
#define HAVE_BACULA_DB 1
#endif /* HAVE_MYSQL */
#endif /* HAVE_SQLITE */
#endif /* HAVE_POSTGRESQL */
+#endif /* HAVE_DBI */
#endif
/* Use for better error location printing */
/*
* Exported globals from sql.c
*/
-extern int db_type; /* SQL engine type index */
+extern int DLL_IMP_EXP db_type; /* SQL engine type index */
/*
* Some functions exported by sql.c for use within the
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2003-2008 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version two of the GNU General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of John Walker.
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Bacula Catalog Database routines specific to DBI
+ * These are DBI specific routines
+ *
+ * João Henrique Freitas, December 2007
+ * based upon work done by Dan Langille, December 2003 and
+ * by Kern Sibbald, March 2000
+ *
+ * Version $Id$
+ */
+
+
+/* The following is necessary so that we do not include
+ * the dummy external definition of DB.
+ */
+#define __SQL_C /* indicate that this is sql.c */
+
+#include "bacula.h"
+#include "cats.h"
+
+#ifdef HAVE_DBI
+
+/* -----------------------------------------------------------------------
+ *
+ * DBI dependent defines and subroutines
+ *
+ * -----------------------------------------------------------------------
+ */
+
+/* List of open databases */
+static BQUEUE db_list = {&db_list, &db_list};
+
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Retrieve database type
+ */
+const char *
+db_get_type(void)
+{
+ return "DBI";
+}
+
+/*
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
+ */
+B_DB *
+db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ int mult_db_connections)
+{
+ B_DB *mdb;
+ char db_driver[10];
+ char db_driverdir[256];
+
+ /* Constraint the db_driver */
+ if(db_type == -1) {
+ Jmsg(jcr, M_FATAL, 0, _("A dbi driver for DBI must be supplied.\n"));
+ return NULL;
+ }
+
+ /* Do the correct selection of driver.
+ * Can be one of the varius supported by libdbi
+ */
+ switch (db_type) {
+ case SQL_TYPE_MYSQL:
+ bstrncpy(db_driver,"mysql", sizeof(db_driver));
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ bstrncpy(db_driver,"pgsql", sizeof(db_driver));
+ break;
+ case SQL_TYPE_SQLITE:
+ bstrncpy(db_driver,"pgsql", sizeof(db_driver));
+ break;
+ }
+
+ /* Set db_driverdir whereis is the libdbi drivers */
+ bstrncpy(db_driverdir, DBI_DRIVER_DIR, 255);
+
+ if (!db_user) {
+ Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n"));
+ return NULL;
+ }
+ P(mutex); /* lock DB queue */
+ if (!mult_db_connections) {
+ /* Look to see if DB already open */
+ for (mdb=NULL; (mdb=(B_DB *)qnext(&db_list, &mdb->bq)); ) {
+ if (bstrcmp(mdb->db_name, db_name) &&
+ bstrcmp(mdb->db_address, db_address) &&
+ bstrcmp(mdb->db_driver, db_driver) &&
+ mdb->db_port == db_port) {
+ Dmsg3(100, "DB REopen %d %s %s\n", mdb->ref_count, db_driver, db_name);
+ mdb->ref_count++;
+ V(mutex);
+ return mdb; /* already open */
+ }
+ }
+ }
+ Dmsg0(100, "db_open first time\n");
+ mdb = (B_DB *)malloc(sizeof(B_DB));
+ memset(mdb, 0, sizeof(B_DB));
+ mdb->db_name = bstrdup(db_name);
+ mdb->db_user = bstrdup(db_user);
+ if (db_password) {
+ mdb->db_password = bstrdup(db_password);
+ }
+ if (db_address) {
+ mdb->db_address = bstrdup(db_address);
+ }
+ if (db_socket) {
+ mdb->db_socket = bstrdup(db_socket);
+ }
+ if (db_driverdir) {
+ mdb->db_driverdir = bstrdup(db_driverdir);
+ }
+ if (db_driver) {
+ mdb->db_driver = bstrdup(db_driver);
+ }
+ mdb->db_type = db_type;
+ mdb->db_port = db_port;
+ mdb->have_insert_id = TRUE;
+ mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *mdb->errmsg = 0;
+ mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ mdb->cached_path = get_pool_memory(PM_FNAME);
+ mdb->cached_path_id = 0;
+ mdb->ref_count = 1;
+ mdb->fname = get_pool_memory(PM_FNAME);
+ mdb->path = get_pool_memory(PM_FNAME);
+ mdb->esc_name = get_pool_memory(PM_FNAME);
+ mdb->esc_path = get_pool_memory(PM_FNAME);
+ mdb->allow_transactions = mult_db_connections;
+ qinsert(&db_list, &mdb->bq); /* put db in list */
+ V(mutex);
+ return mdb;
+}
+
+/*
+ * Now actually open the database. This can generate errors,
+ * which are returned in the errmsg
+ *
+ * DO NOT close the database or free(mdb) here !!!!
+ */
+int
+db_open_database(JCR *jcr, B_DB *mdb)
+{
+ int errstat;
+ int dbstat;
+ const char *errmsg;
+ char buf[10], *port;
+ int numdrivers;
+
+ P(mutex);
+ if (mdb->connected) {
+ V(mutex);
+ return 1;
+ }
+ mdb->connected = false;
+
+ if ((errstat=rwl_init(&mdb->lock)) != 0) {
+ berrno be;
+ Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ be.bstrerror(errstat));
+ V(mutex);
+ return 0;
+ }
+
+ if (mdb->db_port) {
+ bsnprintf(buf, sizeof(buf), "%d", mdb->db_port);
+ port = buf;
+ } else {
+ port = NULL;
+ }
+
+ numdrivers = dbi_initialize(mdb->db_driverdir);
+ if (numdrivers < 0) {
+ dbi_shutdown();
+ Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
+ "db_driverdir=%s. It is probaly not found any drivers\n"),
+ mdb->db_driverdir,numdrivers);
+ V(mutex);
+ return 0;
+ }
+ mdb->db = (void **)dbi_conn_new(mdb->db_driver);
+ dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
+ dbi_conn_set_option(mdb->db, "port", port); /* default port */
+ dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
+ dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
+ dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
+
+ /* If connection fails, try at 5 sec intervals for 30 seconds. */
+ for (int retry=0; retry < 6; retry++) {
+
+ dbstat = dbi_conn_connect(mdb->db);
+ if ( dbstat == 0) {
+ break;
+ }
+
+ dbi_conn_error(mdb->db, &errmsg);
+ Dmsg1(50, "dbi error: %s\n", errmsg);
+
+ bmicrosleep(5, 0);
+
+ }
+
+ if ( dbstat != 0 ) {
+ Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface.\n"
+ "Type=%s Database=%s User=%s\n"
+ "It is probably not running or your password is incorrect.\n"),
+ mdb->db_driver, mdb->db_name, mdb->db_user);
+ V(mutex);
+ return 0;
+ }
+
+ Dmsg0(50, "dbi_real_connect done\n");
+ Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n",
+ mdb->db_user, mdb->db_name,
+ mdb->db_password==NULL?"(NULL)":mdb->db_password);
+
+ mdb->connected = true;
+
+ if (!check_tables_version(jcr, mdb)) {
+ V(mutex);
+ return 0;
+ }
+
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ /* Set connection timeout to 8 days specialy for batch mode */
+ sql_query(mdb, "SET wait_timeout=691200");
+ sql_query(mdb, "SET interactive_timeout=691200");
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ /* tell PostgreSQL we are using standard conforming strings
+ and avoid warnings such as:
+ WARNING: nonstandard use of \\ in a string literal
+ */
+ sql_query(mdb, "SET datestyle TO 'ISO, YMD'");
+ sql_query(mdb, "set standard_conforming_strings=on");
+ break;
+ case SQL_TYPE_SQLITE:
+ break;
+ }
+
+ V(mutex);
+ return 1;
+}
+
+void
+db_close_database(JCR *jcr, B_DB *mdb)
+{
+ if (!mdb) {
+ return;
+ }
+ db_end_transaction(jcr, mdb);
+ P(mutex);
+ sql_free_result(mdb);
+ mdb->ref_count--;
+ if (mdb->ref_count == 0) {
+ qdchain(&mdb->bq);
+ if (mdb->connected && mdb->db) {
+ sql_close(mdb);
+ mdb->db = NULL;
+ }
+ rwl_destroy(&mdb->lock);
+ free_pool_memory(mdb->errmsg);
+ free_pool_memory(mdb->cmd);
+ free_pool_memory(mdb->cached_path);
+ free_pool_memory(mdb->fname);
+ free_pool_memory(mdb->path);
+ free_pool_memory(mdb->esc_name);
+ free_pool_memory(mdb->esc_path);
+ if (mdb->db_name) {
+ free(mdb->db_name);
+ }
+ if (mdb->db_user) {
+ free(mdb->db_user);
+ }
+ if (mdb->db_password) {
+ free(mdb->db_password);
+ }
+ if (mdb->db_address) {
+ free(mdb->db_address);
+ }
+ if (mdb->db_socket) {
+ free(mdb->db_socket);
+ }
+ dbi_shutdown();
+ if (mdb->db_driver) {
+ free(mdb->db_driver);
+ }
+ free(mdb);
+
+
+ }
+ V(mutex);
+}
+
+void db_thread_cleanup()
+{ }
+
+/*
+ * Return the next unique index (auto-increment) for
+ * the given table. Return NULL on error.
+ *
+ */
+int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index)
+{
+ strcpy(index, "NULL");
+ return 1;
+}
+
+
+/*
+ * Escape strings so that DBI is happy
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ *
+ * dbi_conn_quote_string_copy receives a pointer to pointer.
+ * We need copy the value of pointer to snew. Because libdbi change the
+ * pointer
+ */
+void
+db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+{
+ char *inew;
+ char *pnew;
+
+ if (len == 0) {
+ snew[0] = 0;
+ } else {
+ /* correct the size of old basead in len and copy new string to inew */
+ inew = (char *)malloc(sizeof(char) * len + 1);
+ bstrncpy(inew,old,len + 1);
+ /* escape the correct size of old */
+ dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
+ /* copy the escaped string to snew */
+ bstrncpy(snew, pnew, 2 * len + 1);
+ }
+
+ Dmsg2(500, "dbi_conn_escape_string_copy %p %s\n",snew,snew);
+
+}
+
+/*
+ * Submit a general SQL command (cmd), and for each row returned,
+ * the sqlite_handler is called with the ctx.
+ */
+bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+{
+ SQL_ROW row;
+
+ Dmsg0(500, "db_sql_query started\n");
+
+ db_lock(mdb);
+ if (sql_query(mdb, query) != 0) {
+ Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb));
+ db_unlock(mdb);
+ Dmsg0(500, "db_sql_query failed\n");
+ return false;
+ }
+ Dmsg0(500, "db_sql_query succeeded. checking handler\n");
+
+ if (result_handler != NULL) {
+ Dmsg0(500, "db_sql_query invoking handler\n");
+ if ((mdb->result = sql_store_result(mdb)) != NULL) {
+ int num_fields = sql_num_fields(mdb);
+
+ Dmsg0(500, "db_sql_query sql_store_result suceeded\n");
+ while ((row = sql_fetch_row(mdb)) != NULL) {
+
+ Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
+ if (result_handler(ctx, num_fields, row))
+ break;
+ }
+
+ sql_free_result(mdb);
+ }
+ }
+ db_unlock(mdb);
+
+ Dmsg0(500, "db_sql_query finished\n");
+
+ return true;
+}
+
+
+
+DBI_ROW my_dbi_fetch_row(B_DB *mdb)
+{
+ int j;
+ DBI_ROW row = NULL; // by default, return NULL
+
+ Dmsg0(500, "my_dbi_fetch_row start\n");
+
+ if (!mdb->row || mdb->row_size < mdb->num_fields) {
+ int num_fields = mdb->num_fields;
+ Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields);
+
+ if (mdb->row) {
+ Dmsg0(500, "my_dbi_fetch_row freeing space\n");
+ free(mdb->row);
+ }
+ num_fields += 20; /* add a bit extra */
+ mdb->row = (DBI_ROW)malloc(sizeof(char *) * num_fields);
+ mdb->row_size = num_fields;
+
+ // now reset the row_number now that we have the space allocated
+ mdb->row_number = 1;
+ }
+
+ // if still within the result set
+ if (mdb->row_number <= mdb->num_rows) {
+ Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
+ // get each value from this row
+ for (j = 0; j < mdb->num_fields; j++) {
+ mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
+ Dmsg2(500, "my_dbi_fetch_row field '%d' has value '%s'\n", j, mdb->row[j]);
+ }
+ // increment the row number for the next call
+ mdb->row_number++;
+
+ row = mdb->row;
+ } else {
+ Dmsg2(500, "my_dbi_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
+ }
+
+ Dmsg1(500, "my_dbi_fetch_row finishes returning %p\n", row);
+
+ return row;
+}
+
+int my_dbi_max_length(B_DB *mdb, int field_num) {
+ //
+ // for a given column, find the max length
+ //
+ int max_length;
+ int i;
+ int this_length;
+
+ max_length = 0;
+ for (i = 0; i < mdb->num_rows; i++) {
+ if (my_dbi_getisnull(mdb->result, i, field_num)) {
+ this_length = 4; // "NULL"
+ } else {
+ // TODO: error
+ this_length = cstrlen(my_dbi_getvalue(mdb->result, i, field_num));
+ }
+
+ if (max_length < this_length) {
+ max_length = this_length;
+ }
+ }
+
+ return max_length;
+}
+
+DBI_FIELD * my_dbi_fetch_field(B_DB *mdb)
+{
+ int i;
+ int dbi_index;
+
+ Dmsg0(500, "my_dbi_fetch_field starts\n");
+
+ if (!mdb->fields || mdb->fields_size < mdb->num_fields) {
+ if (mdb->fields) {
+ free(mdb->fields);
+ }
+ Dmsg1(500, "allocating space for %d fields\n", mdb->num_fields);
+ mdb->fields = (DBI_FIELD *)malloc(sizeof(DBI_FIELD) * mdb->num_fields);
+ mdb->fields_size = mdb->num_fields;
+
+ for (i = 0; i < mdb->num_fields; i++) {
+ dbi_index = i + 1;
+ Dmsg1(500, "filling field %d\n", i);
+ mdb->fields[i].name = (char *)dbi_result_get_field_name(mdb->result, dbi_index);
+ mdb->fields[i].max_length = my_dbi_max_length(mdb, i);
+ mdb->fields[i].type = dbi_result_get_field_type_idx(mdb->result, dbi_index);
+ mdb->fields[i].flags = dbi_result_get_field_attribs_idx(mdb->result, dbi_index);
+
+ Dmsg4(500, "my_dbi_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ mdb->fields[i].name, mdb->fields[i].max_length, mdb->fields[i].type,
+ mdb->fields[i].flags);
+ } // end for
+ } // end if
+
+ // increment field number for the next time around
+
+ Dmsg0(500, "my_dbi_fetch_field finishes\n");
+ return &mdb->fields[mdb->field_number++];
+}
+
+void my_dbi_data_seek(B_DB *mdb, int row)
+{
+ // set the row number to be returned on the next call
+ // to my_dbi_fetch_row
+ mdb->row_number = row;
+}
+
+void my_dbi_field_seek(B_DB *mdb, int field)
+{
+ mdb->field_number = field;
+}
+
+/*
+ * Note, if this routine returns 1 (failure), Bacula expects
+ * that no result has been stored.
+ *
+ * Returns: 0 on success
+ * 1 on failure
+ *
+ */
+int my_dbi_query(B_DB *mdb, const char *query)
+{
+ const char *errmsg;
+ Dmsg1(500, "my_dbi_query started %s\n", query);
+ // We are starting a new query. reset everything.
+ mdb->num_rows = -1;
+ mdb->row_number = -1;
+ mdb->field_number = -1;
+
+ if (mdb->result) {
+ dbi_result_free(mdb->result); /* hmm, someone forgot to free?? */
+ mdb->result = NULL;
+ }
+
+ //for (int i=0; i < 10; i++) {
+
+ mdb->result = (void **)dbi_conn_query(mdb->db, query);
+
+ // if (mdb->result) {
+ // break;
+ // }
+ // bmicrosleep(5, 0);
+ //}
+ if (mdb->result == NULL) {
+ Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
+ goto bail_out;
+ }
+
+ //mdb->status = (dbi_error_flag)dbi_conn_error_flag(mdb->db);
+ mdb->status = DBI_ERROR_NONE;
+
+ if (mdb->status == DBI_ERROR_NONE) {
+ Dmsg1(500, "we have a result\n", query);
+
+ // how many fields in the set?
+ mdb->num_fields = dbi_result_get_numfields(mdb->result);
+ Dmsg1(500, "we have %d fields\n", mdb->num_fields);
+
+ mdb->num_rows = dbi_result_get_numrows(mdb->result);
+ Dmsg1(500, "we have %d rows\n", mdb->num_rows);
+
+ mdb->status = (dbi_error_flag) 0; /* succeed */
+ } else {
+ Dmsg1(50, "Result status failed: %s\n", query);
+ goto bail_out;
+ }
+
+ Dmsg0(500, "my_dbi_query finishing\n");
+ return mdb->status;
+
+bail_out:
+ mdb->status = dbi_conn_error_flag(mdb->db);
+ dbi_conn_error(mdb->db, &errmsg);
+ Dmsg4(500, "my_dbi_query we failed dbi error "
+ "'%s' '%p' '%d' flag '%d''\n", errmsg, mdb->result, mdb->result, mdb->status);
+ dbi_result_free(mdb->result);
+ mdb->result = NULL;
+ mdb->status = (dbi_error_flag) 1; /* failed */
+ return mdb->status;
+}
+
+void my_dbi_free_result(B_DB *mdb)
+{
+ int i;
+
+ db_lock(mdb);
+ //Dmsg2(500, "my_dbi_free_result started result '%p' '%p'\n", mdb->result, mdb->result);
+ if (mdb->result != NULL) {
+ i = dbi_result_free(mdb->result);
+ if(i == 0) {
+ mdb->result = NULL;
+ //Dmsg2(500, "my_dbi_free_result result '%p' '%d'\n", mdb->result, mdb->result);
+ }
+
+ }
+
+ if (mdb->row) {
+ free(mdb->row);
+ mdb->row = NULL;
+ }
+
+ if (mdb->fields) {
+ free(mdb->fields);
+ mdb->fields = NULL;
+ }
+ db_unlock(mdb);
+ //Dmsg0(500, "my_dbi_free_result finish\n");
+
+}
+
+const char *my_dbi_strerror(B_DB *mdb)
+{
+ const char *errmsg;
+
+ dbi_conn_error(mdb->db, &errmsg);
+
+ return errmsg;
+}
+
+// TODO: make batch insert work with libdbi
+#ifdef HAVE_BATCH_FILE_INSERT
+
+int my_dbi_batch_start(JCR *jcr, B_DB *mdb)
+{
+ char *query = "COPY batch FROM STDIN";
+
+ Dmsg0(500, "my_postgresql_batch_start started\n");
+
+ if (my_postgresql_query(mdb,
+ "CREATE TEMPORARY TABLE batch ("
+ "fileindex int,"
+ "jobid int,"
+ "path varchar,"
+ "name varchar,"
+ "lstat varchar,"
+ "md5 varchar)") == 1)
+ {
+ Dmsg0(500, "my_postgresql_batch_start failed\n");
+ return 1;
+ }
+
+ // We are starting a new query. reset everything.
+ mdb->num_rows = -1;
+ mdb->row_number = -1;
+ mdb->field_number = -1;
+
+ my_postgresql_free_result(mdb);
+
+ for (int i=0; i < 10; i++) {
+ mdb->result = PQexec(mdb->db, query);
+ if (mdb->result) {
+ break;
+ }
+ bmicrosleep(5, 0);
+ }
+ if (!mdb->result) {
+ Dmsg1(50, "Query failed: %s\n", query);
+ goto bail_out;
+ }
+
+ mdb->status = PQresultStatus(mdb->result);
+ if (mdb->status == PGRES_COPY_IN) {
+ // how many fields in the set?
+ mdb->num_fields = (int) PQnfields(mdb->result);
+ mdb->num_rows = 0;
+ mdb->status = 1;
+ } else {
+ Dmsg1(50, "Result status failed: %s\n", query);
+ goto bail_out;
+ }
+
+ Dmsg0(500, "my_postgresql_batch_start finishing\n");
+
+ return mdb->status;
+
+bail_out:
+ Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->db));
+ mdb->status = 0;
+ PQclear(mdb->result);
+ mdb->result = NULL;
+ return mdb->status;
+}
+
+/* set error to something to abort operation */
+int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error)
+{
+ int res;
+ int count=30;
+ Dmsg0(500, "my_postgresql_batch_end started\n");
+
+ if (!mdb) { /* no files ? */
+ return 0;
+ }
+
+ do {
+ res = PQputCopyEnd(mdb->db, error);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->status = 1;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ mdb->status = 0;
+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+ }
+
+ Dmsg0(500, "my_postgresql_batch_end finishing\n");
+
+ return mdb->status;
+}
+
+int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+{
+ int res;
+ int count=30;
+ size_t len;
+ char *digest;
+ char ed1[50];
+
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
+ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+
+ mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
+ my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
+
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+
+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+
+ do {
+ res = PQputCopyData(mdb->db,
+ mdb->cmd,
+ len);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->changes++;
+ mdb->status = 1;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ mdb->status = 0;
+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+ }
+
+ Dmsg0(500, "my_postgresql_batch_insert finishing\n");
+
+ return mdb->status;
+}
+
+#endif /* HAVE_BATCH_FILE_INSERT */
+
+/* my_dbi_getisnull
+ * like PQgetisnull
+ * int PQgetisnull(const PGresult *res,
+ * int row_number,
+ * int column_number);
+ *
+ * use dbi_result_seek_row to search in result set
+ */
+int my_dbi_getisnull(dbi_result *result, int row_number, int column_number) {
+ int i;
+
+ if(row_number == 0) {
+ row_number++;
+ }
+
+ column_number++;
+
+ if(dbi_result_seek_row(result, row_number)) {
+
+ i = dbi_result_field_is_null_idx(result,column_number);
+
+ return i;
+ } else {
+
+ return 0;
+ }
+
+}
+/* my_dbi_getvalue
+ * like PQgetvalue;
+ * char *PQgetvalue(const PGresult *res,
+ * int row_number,
+ * int column_number);
+ *
+ * use dbi_result_seek_row to search in result set
+ * use example to return only strings
+ */
+char *my_dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number) {
+
+ /* TODO: This is very bad, need refactoring */
+ POOLMEM *buf = get_pool_memory(PM_FNAME);
+ //const unsigned char *bufb = (unsigned char *)malloc(sizeof(unsigned char) * 300);
+ //const unsigned char *bufb;
+ const char *errmsg;
+ const char *field_name;
+ unsigned short dbitype;
+ int32_t field_length = 0;
+ int64_t num;
+
+ /* correct the index for dbi interface
+ * dbi index begins 1
+ * I prefer do not change others functions
+ */
+ Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
+ result, row_number, column_number);
+
+ column_number++;
+
+ if(row_number == 0) {
+ row_number++;
+ }
+
+ Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
+ result, row_number, column_number);
+
+ if(dbi_result_seek_row(result, row_number)) {
+
+ field_name = dbi_result_get_field_name(result, column_number);
+ field_length = dbi_result_get_field_length(result, field_name);
+ dbitype = dbi_result_get_field_type_idx(result,column_number);
+
+ if(field_length) {
+ buf = check_pool_memory_size(buf, field_length + 1);
+ } else {
+ buf = check_pool_memory_size(buf, 50);
+ }
+
+ Dmsg5(500, "my_dbi_getvalue result '%p' type '%d' \n field name '%s' "
+ "field_length '%d' field_length size '%d'\n",
+ result, dbitype, field_name, field_length, sizeof_pool_memory(buf));
+
+ switch (dbitype) {
+ case DBI_TYPE_INTEGER:
+ num = dbi_result_get_longlong(result, field_name);
+ edit_int64(num, buf);
+ field_length = strlen(buf);
+ break;
+ case DBI_TYPE_STRING:
+ if(field_length) {
+ field_length = bsnprintf(buf, field_length + 1, "%s",
+ dbi_result_get_string(result, field_name));
+ } else {
+ buf[0] = 0;
+ }
+ break;
+ case DBI_TYPE_BINARY:
+ /* dbi_result_get_binary return a NULL pointer if value is empty
+ * following, change this to what Bacula espected
+ */
+ if(field_length) {
+ field_length = bsnprintf(buf, field_length + 1, "%s",
+ dbi_result_get_binary(result, field_name));
+ } else {
+ buf[0] = 0;
+ }
+ break;
+ case DBI_TYPE_DATETIME:
+ time_t last;
+ struct tm tm;
+
+ last = dbi_result_get_datetime(result, field_name);
+
+ if(last == -1) {
+ field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
+ } else {
+ (void)localtime_r(&last, &tm);
+ field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d",
+ (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ }
+ break;
+ }
+
+ } else {
+ dbi_conn_error(dbi_result_get_conn(result), &errmsg);
+ Dmsg1(500, "my_dbi_getvalue error: %s\n", errmsg);
+ }
+
+ Dmsg3(500, "my_dbi_getvalue finish result '%p' num bytes '%d' data '%s'\n",
+ result, field_length, buf);
+ return buf;
+}
+
+int my_dbi_sql_insert_id(B_DB *mdb, char *table_name)
+{
+ /*
+ Obtain the current value of the sequence that
+ provides the serial value for primary key of the table.
+
+ currval is local to our session. It is not affected by
+ other transactions.
+
+ Determine the name of the sequence.
+ PostgreSQL automatically creates a sequence using
+ <table>_<column>_seq.
+ At the time of writing, all tables used this format for
+ for their primary key: <table>id
+ Except for basefiles which has a primary key on baseid.
+ Therefore, we need to special case that one table.
+
+ everything else can use the PostgreSQL formula.
+ */
+
+ char sequence[30];
+ uint64_t id = 0;
+
+ if (mdb->db_type == SQL_TYPE_POSTGRESQL) {
+
+ if (strcasecmp(table_name, "basefiles") == 0) {
+ bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
+ } else {
+ bstrncpy(sequence, table_name, sizeof(sequence));
+ bstrncat(sequence, "_", sizeof(sequence));
+ bstrncat(sequence, table_name, sizeof(sequence));
+ bstrncat(sequence, "id", sizeof(sequence));
+ }
+
+ bstrncat(sequence, "_seq", sizeof(sequence));
+ id = dbi_conn_sequence_last(mdb->db, NT_(sequence));
+ } else {
+ id = dbi_conn_sequence_last(mdb->db, NT_(table_name));
+ }
+
+ return id;
+}
+
+#ifdef HAVE_BATCH_FILE_INSERT
+const char *my_dbi_batch_lock_path_query =
+ "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
+
+
+const char *my_dbi_batch_lock_filename_query =
+ "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
+
+const char *my_dbi_batch_unlock_tables_query = "COMMIT";
+
+const char *my_dbi_batch_fill_path_query =
+ "INSERT INTO Path (Path) "
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a "
+ "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
+
+
+const char *my_dbi_batch_fill_filename_query =
+ "INSERT INTO Filename (Name) "
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) as a "
+ "WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename WHERE Name = a.Name)";
+#endif /* HAVE_BATCH_FILE_INSERT */
+
+#endif /* HAVE_DBI */
"DROP TABLE DelCandidates",
NULL};
-
-/* List of SQL commands to create temp table and indicies */
-const char *create_deltabs[] = {
- "CREATE TEMPORARY TABLE DelCandidates ("
-#if defined(HAVE_MYSQL)
- "JobId INTEGER UNSIGNED NOT NULL, "
- "PurgedFiles TINYINT, "
- "FileSetId INTEGER UNSIGNED, "
- "JobFiles INTEGER UNSIGNED, "
- "JobStatus BINARY(1))",
-#elif defined(HAVE_POSTGRESQL)
- "JobId INTEGER NOT NULL, "
- "PurgedFiles SMALLINT, "
- "FileSetId INTEGER, "
- "JobFiles INTEGER, "
- "JobStatus char(1))",
-#else
- "JobId INTEGER UNSIGNED NOT NULL, "
- "PurgedFiles TINYINT, "
- "FileSetId INTEGER UNSIGNED, "
- "JobFiles INTEGER UNSIGNED, "
- "JobStatus CHAR)",
-#endif
- "CREATE INDEX DelInx1 ON DelCandidates (JobId)",
- NULL};
+const char *create_delindex = "CREATE INDEX DelInx1 ON DelCandidates (JobId)";
/* Fill candidates table with all Jobs subject to being deleted.
* This is used for pruning Jobs (first the files, then the Jobs).
"FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus='T' "
"AND Type='B' ORDER BY StartTime DESC LIMIT 20";
-#ifdef HAVE_MYSQL
-/* MYSQL IS NOT STANDARD SQL !!!!! */
-/* List Jobs where a particular file is saved */
-const char *uar_file =
- "SELECT Job.JobId as JobId,"
- "CONCAT(Path.Path,Filename.Name) as Name, "
- "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
- "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
- "AND Client.ClientId=Job.ClientId "
- "AND Job.JobId=File.JobId "
- "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
-#else
-/* List Jobs where a particular file is saved */
-const char *uar_file =
- "SELECT Job.JobId as JobId,"
- "Path.Path||Filename.Name as Name, "
- "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
- "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
- "AND Client.ClientId=Job.ClientId "
- "AND Job.JobId=File.JobId "
- "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20";
-#endif
-
-
/*
* Find all files for a particular JobId and insert them into
* the tree during a restore.
const char *uar_del_temp = "DROP TABLE temp";
const char *uar_del_temp1 = "DROP TABLE temp1";
-const char *uar_create_temp =
- "CREATE TEMPORARY TABLE temp ("
-#ifdef HAVE_POSTGRESQL
- "JobId INTEGER NOT NULL,"
- "JobTDate BIGINT,"
- "ClientId INTEGER,"
- "Level CHAR,"
- "JobFiles INTEGER,"
- "JobBytes BIGINT,"
- "StartTime TEXT,"
- "VolumeName TEXT,"
- "StartFile INTEGER,"
- "VolSessionId INTEGER,"
- "VolSessionTime INTEGER)";
-#else
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED,"
- "ClientId INTEGER UNSIGNED,"
- "Level CHAR,"
- "JobFiles INTEGER UNSIGNED,"
- "JobBytes BIGINT UNSIGNED,"
- "StartTime TEXT,"
- "VolumeName TEXT,"
- "StartFile INTEGER UNSIGNED,"
- "VolSessionId INTEGER UNSIGNED,"
- "VolSessionTime INTEGER UNSIGNED)";
-#endif
-
-const char *uar_create_temp1 =
- "CREATE TEMPORARY TABLE temp1 ("
-#ifdef HAVE_POSTGRESQL
- "JobId INTEGER NOT NULL,"
- "JobTDate BIGINT)";
-#else
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED)";
-#endif
-
const char *uar_last_full =
"INSERT INTO temp1 SELECT Job.JobId,JobTdate "
"FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s "
"AND Filename.FilenameId=File.FilenameId "
"ORDER BY Job.StartTime DESC LIMIT 1";
+/* Query to get list of files from table -- presuably built by an external program */
+const char *uar_jobid_fileindex_from_table =
+ "SELECT JobId,FileIndex from %s";
+
+
+/*
+ *
+ * This file contains all the SQL commands issued by the Director
+ *
+ * Kern Sibbald, July MMII
+ *
+ * Version $Id$
+ */
+/*
+ * Note, PostgreSQL imposes some constraints on using DISTINCT and GROUP BY
+ * for example, the following is illegal in PostgreSQL:
+ * SELECT DISTINCT JobId FROM temp ORDER BY StartTime ASC;
+ * because all the ORDER BY expressions must appear in the SELECT list!
+ */
+
+
+#include "bacula.h"
+#include "cats.h"
+
+/* ====== ua_prune.c */
+
+/* List of SQL commands to create temp table and indicies */
+const char *create_deltabs[3] = {
+ /* MySQL */
+ "CREATE TEMPORARY TABLE DelCandidates ("
+ "JobId INTEGER UNSIGNED NOT NULL, "
+ "PurgedFiles TINYINT, "
+ "FileSetId INTEGER UNSIGNED, "
+ "JobFiles INTEGER UNSIGNED, "
+ "JobStatus BINARY(1))",
+ /* Postgresql */
+ "CREATE TEMPORARY TABLE DelCandidates ("
+ "JobId INTEGER NOT NULL, "
+ "PurgedFiles SMALLINT, "
+ "FileSetId INTEGER, "
+ "JobFiles INTEGER, "
+ "JobStatus char(1))",
+ /* SQLite */
+ "CREATE TEMPORARY TABLE DelCandidates ("
+ "JobId INTEGER UNSIGNED NOT NULL, "
+ "PurgedFiles TINYINT, "
+ "FileSetId INTEGER UNSIGNED, "
+ "JobFiles INTEGER UNSIGNED, "
+ "JobStatus CHAR)"};
+
+/* ======= ua_restore.c */
+
+/* List Jobs where a particular file is saved */
+const char *uar_file[3] = {
+ /* Mysql */
+ "SELECT Job.JobId as JobId,"
+ "CONCAT(Path.Path,Filename.Name) as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+ /* Postgresql */
+ "SELECT Job.JobId as JobId,"
+ "Path.Path||Filename.Name as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+ /* SQLite */
+ "SELECT Job.JobId as JobId,"
+ "Path.Path||Filename.Name as Name, "
+ "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
+ "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
+ "AND Client.ClientId=Job.ClientId "
+ "AND Job.JobId=File.JobId "
+ "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
+ "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20"};
+
+const char *uar_create_temp[3] = {
+ /* Mysql */
+ "CREATE TEMPORARY TABLE temp ("
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED,"
+ "ClientId INTEGER UNSIGNED,"
+ "Level CHAR,"
+ "JobFiles INTEGER UNSIGNED,"
+ "JobBytes BIGINT UNSIGNED,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER UNSIGNED,"
+ "VolSessionId INTEGER UNSIGNED,"
+ "VolSessionTime INTEGER UNSIGNED)",
+ /* Postgresql */
+ "CREATE TEMPORARY TABLE temp ("
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT,"
+ "ClientId INTEGER,"
+ "Level CHAR,"
+ "JobFiles INTEGER,"
+ "JobBytes BIGINT,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER,"
+ "VolSessionId INTEGER,"
+ "VolSessionTime INTEGER)",
+ /* SQLite */
+ "CREATE TEMPORARY TABLE temp ("
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED,"
+ "ClientId INTEGER UNSIGNED,"
+ "Level CHAR,"
+ "JobFiles INTEGER UNSIGNED,"
+ "JobBytes BIGINT UNSIGNED,"
+ "StartTime TEXT,"
+ "VolumeName TEXT,"
+ "StartFile INTEGER UNSIGNED,"
+ "VolSessionId INTEGER UNSIGNED,"
+ "VolSessionTime INTEGER UNSIGNED)"};
+
+const char *uar_create_temp1[3] = {
+ /* Mysql */
+ "CREATE TEMPORARY TABLE temp1 ("
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED)",
+ /* Postgresql */
+ "CREATE TEMPORARY TABLE temp1 ("
+ "JobId INTEGER NOT NULL,"
+ "JobTDate BIGINT)",
+ /* SQLite */
+ "CREATE TEMPORARY TABLE temp1 ("
+ "JobId INTEGER UNSIGNED NOT NULL,"
+ "JobTDate BIGINT UNSIGNED)"};
+
/* Query to get all files in a directory -- no recursing
* Note, for PostgreSQL since it respects the "Single Value
* rule", the results of the SELECT will be unoptimized.
* for each time it was backed up.
*/
-#ifdef HAVE_POSTGRESQL
-const char *uar_jobid_fileindex_from_dir =
+const char *uar_jobid_fileindex_from_dir[3] = {
+ /* Mysql */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Job.JobId=File.JobId "
"AND Client.Name='%s' "
"AND Job.ClientId=Client.ClientId "
"AND Path.PathId=File.Pathid "
- "AND Filename.FilenameId=File.FilenameId";
-#else
-const char *uar_jobid_fileindex_from_dir =
+ "AND Filename.FilenameId=File.FilenameId "
+ "GROUP BY File.FileIndex ",
+ /* Postgresql */
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
+ "WHERE Job.JobId IN (%s) "
+ "AND Job.JobId=File.JobId "
+ "AND Path.Path='%s' "
+ "AND Client.Name='%s' "
+ "AND Job.ClientId=Client.ClientId "
+ "AND Path.PathId=File.Pathid "
+ "AND Filename.FilenameId=File.FilenameId",
+ /* SQLite */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Job.JobId=File.JobId "
"AND Job.ClientId=Client.ClientId "
"AND Path.PathId=File.Pathid "
"AND Filename.FilenameId=File.FilenameId "
- "GROUP BY File.FileIndex ";
-#endif
-
-/* Query to get list of files from table -- presuably built by an external program */
-const char *uar_jobid_fileindex_from_table =
- "SELECT JobId,FileIndex from %s";
+ "GROUP BY File.FileIndex "};
Switzerland, email:ftf@fsfeurope.org.
*/
+
extern const char CATS_IMP_EXP *client_backups;
extern const char CATS_IMP_EXP *list_pool;
extern const char CATS_IMP_EXP *drop_deltabs[];
-extern const char CATS_IMP_EXP *create_deltabs[];
+extern const char CATS_IMP_EXP *create_delindex;
extern const char CATS_IMP_EXP *insert_delcand;
extern const char CATS_IMP_EXP *select_backup_del;
extern const char CATS_IMP_EXP *select_verify_del;
extern const char CATS_IMP_EXP *upd_Purged;
extern const char CATS_IMP_EXP *uar_list_jobs;
-extern const char CATS_IMP_EXP *uar_file;
extern const char CATS_IMP_EXP *uar_count_files;
extern const char CATS_IMP_EXP *uar_sel_files;
extern const char CATS_IMP_EXP *uar_del_temp;
extern const char CATS_IMP_EXP *uar_del_temp1;
-extern const char CATS_IMP_EXP *uar_create_temp;
-extern const char CATS_IMP_EXP *uar_create_temp1;
extern const char CATS_IMP_EXP *uar_last_full;
extern const char CATS_IMP_EXP *uar_full;
extern const char CATS_IMP_EXP *uar_inc;
extern const char CATS_IMP_EXP *uar_sel_all_temp;
extern const char CATS_IMP_EXP *uar_count_files;
extern const char CATS_IMP_EXP *uar_jobids_fileindex;
-extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir;
extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table;
extern const char CATS_IMP_EXP *uar_sel_jobid_temp;
+
+extern const char CATS_IMP_EXP *create_deltabs[3];
+
+extern const char CATS_IMP_EXP *uar_file[3];
+extern const char CATS_IMP_EXP *uar_create_temp[3];
+extern const char CATS_IMP_EXP *uar_create_temp1[3];
+extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir[3];
static const int dbglevel = 500;
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
#endif /* ! HAVE_BATCH_FILE_INSERT */
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */
#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
* Generic Routines (or almost generic)
#include "bacula.h"
#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
#include "bacula.h"
#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
}
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL*/
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */
#include "bacula.h"
#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
#include "bacula.h"
#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
/* -----------------------------------------------------------------------
*
# Generic catalog service
Catalog {
Name = MyCatalog
+# Uncomment the following line if you want the dbi driver
+# dbdriver = "dbi:mysql"; dbaddress = 127.0.0.1; dbport = 3306
dbname = @db_name@; user = @db_user@; password = ""
}
static bool create_temp_tables(UAContext *ua)
{
- int i;
/* Create temp tables and indicies */
- for (i=0; create_deltabs[i]; i++) {
- if (!db_sql_query(ua->db, create_deltabs[i], NULL, (void *)NULL)) {
- ua->error_msg("%s", db_strerror(ua->db));
- Dmsg0(050, "create DelTables table failed\n");
- return false;
- }
+ if (!db_sql_query(ua->db, create_deltabs[db_type], NULL, (void *)NULL)) {
+ ua->error_msg("%s", db_strerror(ua->db));
+ Dmsg0(050, "create DelTables table failed\n");
+ return false;
+ }
+ if (!db_sql_query(ua->db, create_delindex, NULL, (void *)NULL)) {
+ ua->error_msg("%s", db_strerror(ua->db));
+ Dmsg0(050, "create DelInx1 index failed\n");
+ return false;
}
return true;
}
len = strlen(ua->cmd);
fname = (char *)malloc(len * 2 + 1);
db_escape_string(ua->jcr, ua->db, fname, ua->cmd, len);
- Mmsg(rx->query, uar_file, rx->ClientName, fname);
+ Mmsg(rx->query, uar_file[db_type], rx->ClientName, fname);
free(fname);
gui_save = ua->jcr->gui;
ua->jcr->gui = true;
*/
static bool insert_dir_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *dir,
char *date)
-{
+{
strip_trailing_junk(dir);
if (*rx->JobIds == 0) {
ua->error_msg(_("No JobId specified cannot continue.\n"));
return false;
} else {
- Mmsg(rx->query, uar_jobid_fileindex_from_dir, rx->JobIds,
- dir, rx->ClientName);
+ Mmsg(rx->query, uar_jobid_fileindex_from_dir[db_type], rx->JobIds, dir, rx->ClientName);
}
rx->found = false;
/* Find and insert jobid and File Index */
/* Create temp tables */
db_sql_query(ua->db, uar_del_temp, NULL, NULL);
db_sql_query(ua->db, uar_del_temp1, NULL, NULL);
- if (!db_sql_query(ua->db, uar_create_temp, NULL, NULL)) {
+ if (!db_sql_query(ua->db, uar_create_temp[db_type], NULL, NULL)) {
ua->error_msg("%s\n", db_strerror(ua->db));
}
- if (!db_sql_query(ua->db, uar_create_temp1, NULL, NULL)) {
+ if (!db_sql_query(ua->db, uar_create_temp1[db_type], NULL, NULL)) {
ua->error_msg("%s\n", db_strerror(ua->db));
}
/*
static time_t lasttime = 0;
+static const char *db_driver = "NULL";
static const char *db_name = "bacula";
static const char *db_user = "bacula";
static const char *db_password = "";
static const char *db_host = NULL;
+static int db_port = 0;
static const char *wd = NULL;
static bool update_db = false;
static bool update_vol_info = false;
" -d <nn> set debug level to <nn>\n"
" -dt print timestamp in debug output\n"
" -m update media info in database\n"
+" -D <driver name> specify the driver database name (default NULL)\n"
" -n <name> specify the database name (default bacula)\n"
" -u <user> specify database user name (default bacula)\n"
" -P <password> specify database password (default none)\n"
" -h <host> specify database host (default NULL)\n"
+" -t <port> specify database port (default 0)\n"
" -p proceed inspite of I/O errors\n"
" -r list records\n"
" -s synchronize or store in database\n"
OSDependentInit();
- while ((ch = getopt(argc, argv, "b:c:d:h:mn:pP:rsSu:vV:w:?")) != -1) {
+ while ((ch = getopt(argc, argv, "b:c:dD:h:p:mn:pP:rsStu:vV:w:?")) != -1) {
switch (ch) {
case 'S' :
showProgress = true;
configfile = bstrdup(optarg);
break;
+ case 'D':
+ db_driver = optarg;
+ break;
+
case 'd': /* debug level */
if (*optarg == 't') {
dbg_timestamp = true;
case 'h':
db_host = optarg;
break;
+
+ case 't':
+ db_port = atoi(optarg);
+ break;
case 'm':
update_vol_info = true;
edit_uint64(currentVolumeSize, ed1));
}
- if ((db=db_init_database(NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
+ if ((db=db_init(NULL, db_driver, db_name, db_user, db_password,
+ db_host, db_port, NULL, 0)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
if (!db_open_database(NULL, db)) {
General:
22Feb08
+kes Apply patch (with some difficulties) from Joao Henrique Freitas
+ <joaohf@gmail.com>, which adds support for libdbi as a Bacula
+ database driver.
kes Add patch from Martin Schmid scm@apsag.com that checks to see if
ftruncate() actually works. In the case of some (cheap) NAS devices,
it does not, and so recycling NAS Volumes does not work. The code