WRAPLIBS = @WRAPLIBS@
DINCLUDE = @DINCLUDE@
DLIB = @DLIB@
-DB_LIBS = @DB_LIBS@
PYTHON_LIBS = @PYTHON_LIBS@
PYTHON_INC = @PYTHON_INCDIR@
OPENSSL_LIBS = @OPENSSL_LIBS@
-BDB_CPPFLAGS = @BDB_CPPFLAGS@
-BDB_LIBS = @BDB_LIBS@
# Windows (cygwin) flags
/* Define if you want to use embedded MySQL */
#undef HAVE_EMBEDDED_MYSQL
-/* Define if you want to use SQLite */
-#undef HAVE_SQLITE
-
/* Define if you want to use SQLite3 */
#undef HAVE_SQLITE3
-/* Define if you want to use Berkeley DB */
-#undef HAVE_BERKELEY_DB
-
-/* Define if you want to use mSQL */
-#undef HAVE_MSQL
-
-/* Define if you want to use iODBC */
-#undef HAVE_IODBC
-
-/* Define if you want to use unixODBC */
-#undef HAVE_UNIXODBC
-
-/* Define if you want to use Solid SQL Server */
-#undef HAVE_SOLID
-
-/* Define if you want to use OpenLink ODBC (Virtuoso) */
-#undef HAVE_VIRT
-
-/* Define if you want to use EasySoft ODBC */
-#undef HAVE_EASYSOFT
-
-/* Define if you want to use Interbase SQL Server */
-#undef HAVE_IBASE
-
-/* Define if you want to use Oracle 8 SQL Server */
-#undef HAVE_ORACLE8
-
-/* Define if you want to use Oracle 7 SQL Server */
-#undef HAVE_ORACLE7
-
-
/* ------------------------------------------------------------------------- */
/* -- CONFIGURE DETECTED FEATURES -- */
/* ------------------------------------------------------------------------- */
/* Define to 1 if utime.h exists and declares struct utimbuf. */
#undef HAVE_UTIME_H
-#if (HAVE_MYSQL||HAVE_POSTGRESQL||HAVE_MSQL||HAVE_IODBC||HAVE_UNIXODBC||HAVE_SOLID||HAVE_VIRT||HAVE_IBASE||HAVE_ORACLE8||HAVE_ORACLE7||HAVE_EASYSOFT)
-#define HAVE_SQL
-#endif
-
/* Data types */
#undef HAVE_U_INT
#undef HAVE_INTXX_T
AC_DEFUN([BA_CHECK_DBI_DB],
[
-db_found=no
AC_MSG_CHECKING(for DBI support)
AC_ARG_WITH(dbi,
AC_HELP_STRING([--with-dbi@<:@=DIR@:>@], [Include DBI support. DIR is the DBD base install directory, default is to search through a number of common places for the DBI files.]),
AC_MSG_ERROR(Invalid DBD driver directory $withval - unable to find DBD drivers under $withval)
fi
fi
- SQL_INCLUDE=-I$DBI_INCDIR
- SQL_LFLAGS="-L$DBI_LIBDIR -ldbi"
- SQL_BINDIR=$DBI_BINDIR
- SQL_LIB=$DBI_LIBDIR/libdbi.a
+ DBI_INCLUDE=-I$DBI_INCDIR
+ DBI_LIBS="-R $DBI_LIBDIR -L$DBI_LIBDIR -ldbi"
+ DBI_LIB=$DBI_LIBDIR/libdbi.a
DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\""
+ DB_LIBS="${DB_LIBS} ${DBI_LIBS}"
AC_DEFINE(HAVE_DBI, 1, [Set if you have the DBI driver])
AC_MSG_RESULT(yes)
- db_found=yes
- support_dbi=yes
- db_type=DBI
- DB_TYPE=dbi
+
+ if test -z "${db_backends}"; then
+ db_backends="DBI"
+ else
+ db_backends="${db_backends} DBI"
+ fi
+ if test -z "${DB_BACKENDS}" ; then
+ DB_BACKENDS="dbi"
+ else
+ DB_BACKENDS="${DB_BACKENDS} dbi"
+ fi
uncomment_dbi=" "
else
],[
AC_MSG_RESULT(no)
])
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-AC_SUBST(SQL_BINDIR)
+
+AC_SUBST(DBI_LIBS)
+AC_SUBST(DBI_INCLUDE)
+AC_SUBST(DBI_BINDIR)
AC_SUBST(DBI_DBD_DRIVERDIR)
AC_SUBST(uncomment_dbi)
AC_DEFUN([BA_CHECK_DBI_DRIVER],
[
-db_found=no
db_prog=no
AC_MSG_CHECKING(for DBI drivers support)
AC_ARG_WITH(dbi-driver,
SQL_BINDIR=/usr/local/mysql/bin
if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \
-o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=/usr/local/mysql/lib64/mysql
+ MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql
else
- SQL_LIBDIR=/usr/local/mysql/lib/mysql
+ MYSQL_LIBDIR=/usr/local/mysql/lib/mysql
fi
elif test -f /usr/bin/mysql; then
SQL_BINDIR=/usr/bin
if test -f /usr/lib64/mysql/libmysqlclient_r.a \
-o -f /usr/lib64/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=/usr/lib64/mysql
+ MYSQL_LIBDIR=/usr/lib64/mysql
elif test -f /usr/lib/mysql/libmysqlclient_r.a \
-o -f /usr/lib/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=/usr/lib/mysql
+ MYSQL_LIBDIR=/usr/lib/mysql
else
- SQL_LIBDIR=/usr/lib
+ MYSQL_LIBDIR=/usr/lib
fi
elif test -f /usr/local/bin/mysql; then
SQL_BINDIR=/usr/local/bin
if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \
-o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=/usr/local/lib64/mysql
+ MYSQL_LIBDIR=/usr/local/lib64/mysql
elif test -f /usr/local/lib/mysql/libmysqlclient_r.a \
-o -f /usr/local/lib/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=/usr/local/lib/mysql
+ MYSQL_LIBDIR=/usr/local/lib/mysql
else
- SQL_LIBDIR=/usr/local/lib
+ MYSQL_LIBDIR=/usr/local/lib
fi
elif test -f $withval/bin/mysql; then
SQL_BINDIR=$withval/bin
if test -f $withval/lib64/mysql/libmysqlclient_r.a \
-o -f $withval/lib64/mysql/libmysqlclient_r.so; then
- SQL_LIBDIR=$withval/lib64/mysql
+ MYSQL_LIBDIR=$withval/lib64/mysql
elif test -f $withval/lib64/libmysqlclient_r.a \
-o -f $withval/lib64/libmysqlclient_r.so; then
- SQL_LIBDIR=$withval/lib64
+ MYSQL_LIBDIR=$withval/lib64
elif test -f $withval/lib/libmysqlclient_r.a \
-o -f $withval/lib/libmysqlclient_r.so; then
- SQL_LIBDIR=$withval/lib/
+ MYSQL_LIBDIR=$withval/lib/
else
- SQL_LIBDIR=$withval/lib/mysql
+ MYSQL_LIBDIR=$withval/lib/mysql
fi
else
AC_MSG_RESULT(no)
db_prog="postgresql"
PG_CONFIG=`which pg_config`
if test -n "$PG_CONFIG"; then
- SQL_BINDIR=`"$PG_CONFIG" --bindir`
- SQL_LIBDIR=`"$PG_CONFIG" --libdir`
+ POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir`
+ POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir`
elif test -f /usr/local/bin/psql; then
- SQL_BINDIR=/usr/local/bin
+ POSTGRESQL_BINDIR=/usr/local/bin
if test -d /usr/local/lib64; then
- SQL_LIBDIR=/usr/local/lib64
+ POSTGRESQL_LIBDIR=/usr/local/lib64
else
- SQL_LIBDIR=/usr/local/lib
+ POSTGRESQL_LIBDIR=/usr/local/lib
fi
elif test -f /usr/bin/psql; then
- SQL_BINDIR=/usr/local/bin
+ POSTGRESQL_BINDIR=/usr/local/bin
if test -d /usr/lib64/postgresql; then
- SQL_LIBDIR=/usr/lib64/postgresql
+ POSTGRESQL_LIBDIR=/usr/lib64/postgresql
elif test -d /usr/lib/postgresql; then
- SQL_LIBDIR=/usr/lib/postgresql
+ POSTGRESQL_LIBDIR=/usr/lib/postgresql
elif test -d /usr/lib64; then
- SQL_LIBDIR=/usr/lib64
+ POSTGRESQL_LIBDIR=/usr/lib64
else
- SQL_LIBDIR=/usr/lib
+ POSTGRESQL_LIBDIR=/usr/lib
fi
elif test -f $withval/bin/psql; then
- SQL_BINDIR=$withval/bin
+ POSTGRESQL_BINDIR=$withval/bin
if test -d $withval/lib64; then
- SQL_LIBDIR=$withval/lib64
+ POSTGRESQL_LIBDIR=$withval/lib64
else
- SQL_LIBDIR=$withval/lib
+ POSTGRESQL_LIBDIR=$withval/lib
fi
else
AC_MSG_RESULT(no)
AC_MSG_ERROR(Unable to find psql in standard locations)
fi
- if test -f $SQL_LIBDIR/libpq.so; then
- DB_PROG_LIB=$SQL_LIBDIR/libpq.so
- else
- DB_PROG_LIB=$SQL_LIBDIR/libpq.a
- fi
- ;;
- "sqlite")
- db_prog="sqlite"
- if test -f /usr/local/bin/sqlite; then
- SQL_BINDIR=/usr/local/bin
- if test -d /usr/local/lib64; then
- SQL_LIBDIR=/usr/local/lib64
- else
- SQL_LIBDIR=/usr/local/lib
- fi
- elif test -f /usr/bin/sqlite; then
- SQL_BINDIR=/usr/bin
- if test -d /usr/lib64; then
- SQL_LIBDIR=/usr/lib64
- else
- SQL_LIBDIR=/usr/lib
- fi
- elif test -f $withval/bin/sqlite; then
- SQL_BINDIR=$withval/bin
- if test -d $withval/lib64; then
- SQL_LIBDIR=$withval/lib64
- else
- SQL_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Unable to find sqlite in standard locations)
- fi
- if test -f $SQL_LIBDIR/libsqlite.so; then
- DB_PROG_LIB=$SQL_LIBDIR/libsqlite.so
+ if test -f $POSTGRESQL_LIBDIR/libpq.so; then
+ DB_PROG_LIB=$POSTGRESQL_LIBDIR/libpq.so
else
- DB_PROG_LIB=$SQL_LIBDIR/libsqlite.a
+ DB_PROG_LIB=$POSTGRESQL_LIBDIR/libpq.a
fi
;;
"sqlite3")
db_prog="sqlite3"
if test -f /usr/local/bin/sqlite3; then
- SQL_BINDIR=/usr/local/bin
+ SQLITE_BINDIR=/usr/local/bin
if test -d /usr/local/lib64; then
- SQL_LIBDIR=/usr/local/lib64
+ SQLITE_LIBDIR=/usr/local/lib64
else
- SQL_LIBDIR=/usr/local/lib
+ SQLITE_LIBDIR=/usr/local/lib
fi
elif test -f /usr/bin/sqlite3; then
- SQL_BINDIR=/usr/bin
+ SQLITE_BINDIR=/usr/bin
if test -d /usr/lib64; then
- SQL_LIBDIR=/usr/lib64
+ SQLITE_LIBDIR=/usr/lib64
else
- SQL_LIBDIR=/usr/lib
+ SQLITE_LIBDIR=/usr/lib
fi
elif test -f $withval/bin/sqlite3; then
- SQL_BINDIR=$withval/bin
+ SQLITE_BINDIR=$withval/bin
if test -d $withval/lib64; then
- SQL_LIBDIR=$withval/lib64
+ SQLITE_LIBDIR=$withval/lib64
else
- SQL_LIBDIR=$withval/lib
+ SQLITE_LIBDIR=$withval/lib
fi
else
AC_MSG_RESULT(no)
AC_MSG_ERROR(Unable to find sqlite in standard locations)
fi
- if test -f $SQL_LIBDIR/libsqlite3.so; then
- DB_PROG_LIB=$SQL_LIBDIR/libsqlite3.so
+ if test -f $SQLITE_LIBDIR/libsqlite3.so; then
+ DB_PROG_LIB=$SQLITE_LIBDIR/libsqlite3.so
else
- DB_PROG_LIB=$SQL_LIBDIR/libsqlite3.a
+ DB_PROG_LIB=$SQLITE_LIBDIR/libsqlite3.a
fi
;;
*)
],[
AC_MSG_RESULT(no)
])
-AC_SUBST(SQL_BINDIR)
+
+AC_SUBST(MYSQL_BINDIR)
+AC_SUBST(POSTGRESQL_BINDIR)
+AC_SUBST(SQLITE_BINDIR)
AC_SUBST(DB_PROG)
AC_SUBST(DB_PROG_LIB)
AC_DEFUN([BA_CHECK_MYSQL_DB],
[
-db_found=no
AC_MSG_CHECKING(for MySQL support)
AC_ARG_WITH(mysql,
AC_HELP_STRING([--with-mysql@<:@=DIR@:>@], [Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files.]),
AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval)
fi
fi
- SQL_INCLUDE=-I$MYSQL_INCDIR
- if test -f $MYSQL_LIBDIR/libmysqlclient_r.a \
- -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then
- SQL_LFLAGS="-L$MYSQL_LIBDIR -lmysqlclient_r -lz"
- AC_DEFINE(HAVE_THREAD_SAFE_MYSQL)
- fi
- SQL_BINDIR=$MYSQL_BINDIR
- SQL_LIB=$MYSQL_LIBDIR/libmysqlclient_r.a
+ MYSQL_INCLUDE=-I$MYSQL_INCDIR
+ if test -f $MYSQL_LIBDIR/libmysqlclient_r.a \
+ -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then
+ MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqlclient_r -lz"
+ AC_DEFINE(HAVE_THREAD_SAFE_MYSQL, 1, [Set if Thread Safe MySQL can be checked using mysql_thread_safe])
+ DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}"
+ fi
+ MYSQL_LIB=$MYSQL_LIBDIR/libmysqlclient_r.a
- AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database])
- AC_MSG_RESULT(yes)
- db_found=yes
- support_mysql=yes
- db_type=MySQL
- DB_TYPE=mysql
+ AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database])
+ AC_MSG_RESULT(yes)
+ if test -z "${db_backends}" ; then
+ db_backends="MySQL"
+ else
+ db_backends="${db_backends} MySQL"
+ fi
+ if test -z "${DB_BACKENDS}" ; then
+ DB_BACKENDS="mysql"
+ else
+ DB_BACKENDS="${DB_BACKENDS} mysql"
+ fi
else
- AC_MSG_RESULT(no)
+ AC_MSG_RESULT(no)
fi
]
)
AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval)
fi
fi
- SQL_INCLUDE=-I$MYSQL_INCDIR
- SQL_LFLAGS="-L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt"
- SQL_BINDIR=$MYSQL_BINDIR
- SQL_LIB=$MYSQL_LIBDIR/libmysqld.a
+ MYSQL_INCLUDE=-I$MYSQL_INCDIR
+ MYSQL_LIBS="-R $MYSQL_LIBDIR -L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt"
+ MYSQL_LIB=$MYSQL_LIBDIR/libmysqld.a
+ DB_LIBS="${DB_LIBS} ${MYSQL_LIBS}"
- AC_DEFINE(HAVE_MYSQL)
- AC_DEFINE(HAVE_EMBEDDED_MYSQL)
- AC_MSG_RESULT(yes)
- db_found=yes
- support_mysql=yes
- db_type=MySQL
- DB_TYPE=mysql
+ AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database])
+ AC_DEFINE(HAVE_EMBEDDED_MYSQL, 1, [Set if you have an Embedded MySQL Database])
+ AC_MSG_RESULT(yes)
+ if test -z "${db_backends}"; then
+ db_backends="MySQL"
+ else
+ db_backends="${db_backends} MySQL"
+ fi
+ if test -z "${DB_BACKENDS}"; then
+ DB_BACKENDS="mysql"
+ else
+ DB_BACKENDS="${DB_BACKENDS} mysql"
+ fi
else
- AC_MSG_RESULT(no)
+ AC_MSG_RESULT(no)
fi
]
)
-
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-AC_SUBST(SQL_BINDIR)
+AC_SUBST(MYSQL_LIBS)
+AC_SUBST(MYSQL_INCLUDE)
+AC_SUBST(MYSQL_BINDIR)
])
-
AC_DEFUN([BA_CHECK_INGRES_DB],
[
-db_found=no
AC_MSG_CHECKING(for Ingres support)
AC_ARG_WITH(ingres,
AC_HELP_STRING([--with-ingres@<:@=DIR@:>@], [Include Ingres support. DIR is the Ingres base install directory, default is to search through a number of common places for the Ingres files.]),
AC_MSG_ERROR(Invalid Ingres directory $withval - unable to find Ingres headers under $withval)
fi
fi
- SQL_INCLUDE=-I$INGRES_INCDIR
- SQL_LFLAGS="-L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1"
- SQL_BINDIR=$INGRES_BINDIR
- SQL_LIB=$INGRES_LIBDIR/libingres.a
+ INGRES_INCLUDE=-I$INGRES_INCDIR
+ INGRES_LIBS="-R $INGRES_LIBDIR -L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1"
+ DB_LIBS="${DB_LIBS} ${INGRES_LIBS}"
AC_DEFINE(HAVE_INGRES, 1, [Set if have Ingres Database])
AC_MSG_RESULT(yes)
- db_found=yes
- support_ingres=yes
- db_type=Ingres
- DB_TYPE=ingres
+ if test -z "${db_backends}"; then
+ db_backends="Ingres"
+ else
+ db_backends="${db_backends} Ingres"
+ fi
+ if test -z "${DB_BACKENDS}"; then
+ DB_BACKENDS="ingres"
+ else
+ DB_BACKENDS="${DB_BACKENDS} ingres"
+ fi
else
AC_MSG_RESULT(no)
fi
AC_MSG_RESULT(no)
])
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-AC_SUBST(SQL_BINDIR)
+AC_SUBST(INGRES_LIBS)
+AC_SUBST(INGRES_INCLUDE)
+AC_SUBST(INGRES_BINDIR)
])
AC_DEFUN([BA_CHECK_SQLITE3_DB],
[
-db_found=no
AC_MSG_CHECKING(for SQLite3 support)
AC_ARG_WITH(sqlite3,
AC_HELP_STRING([--with-sqlite3@<:@=DIR@:>@], [Include SQLite3 support. DIR is the SQLite3 base install directory, default is to search through a number of common places for the SQLite3 files.]),
AC_MSG_ERROR(Invalid SQLite3 directory $withval - unable to find sqlite3.h under $withval)
fi
fi
- SQL_INCLUDE=-I$SQLITE_INCDIR
- SQL_LFLAGS="-L$SQLITE_LIBDIR -lsqlite3"
- SQL_BINDIR=$SQLITE_BINDIR
- SQL_LIB=$SQLITE_LIBDIR/libsqlite3.a
+ SQLITE_INCLUDE=-I$SQLITE_INCDIR
+ SQLITE_LIBS="-R $SQLITE_LIBDIR -L$SQLITE_LIBDIR -lsqlite3"
+ SQLITE_LIB=$SQLITE_LIBDIR/libsqlite3.a
+ DB_LIBS="${DB_LIBS} ${SQLITE_LIBS}"
- AC_DEFINE(HAVE_SQLITE3)
+ AC_DEFINE(HAVE_SQLITE3, 1, [Set if you have an SQLite3 Database])
AC_MSG_RESULT(yes)
- db_found=yes
- support_sqlite3=yes
- db_type=SQLite3
- DB_TYPE=sqlite3
+ if test -z "${db_backends}"; then
+ db_backends="SQLite3"
+ else
+ db_backends="${db_backends} SQLite3"
+ fi
+ if test -z "${DB_BACKENDS}"; then
+ DB_BACKENDS="sqlite3"
+ else
+ DB_BACKENDS="${DB_BACKENDS} sqlite3"
+ fi
else
AC_MSG_RESULT(no)
fi
],[
AC_MSG_RESULT(no)
])
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-AC_SUBST(SQL_BINDIR)
+AC_SUBST(SQLITE_LIBS)
+AC_SUBST(SQLITE_INCLUDE)
+AC_SUBST(SQLITE_BINDIR)
])
AC_DEFUN([BA_CHECK_POSTGRESQL_DB],
[
-db_found=no
AC_MSG_CHECKING(for PostgreSQL support)
AC_ARG_WITH(postgresql,
AC_HELP_STRING([--with-postgresql@<:@=DIR@:>@], [Include PostgreSQL support. DIR is the PostgreSQL base install directory, @<:@default=/usr/local/pgsql@:>@]),
[
if test "$withval" != "no"; then
- if test "$db_found" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
if test "$withval" = "yes"; then
PG_CONFIG=`which pg_config`
- if test -n "$PG_CONFIG";then
+ if test -n "$PG_CONFIG"; then
POSTGRESQL_INCDIR=`"$PG_CONFIG" --includedir`
POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir`
POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir`
POSTGRESQL_BINDIR=$withval/bin
elif test -f $withval/include/postgresql/libpq-fe.h; then
POSTGRESQL_INCDIR=$withval/include/postgresql
- if test -d $withval/lib64; then
- POSTGRESQL_LIBDIR=$withval/lib64
- else
- POSTGRESQL_LIBDIR=$withval/lib
- fi
+ if test -d $withval/lib64; then
+ POSTGRESQL_LIBDIR=$withval/lib64
+ else
+ POSTGRESQL_LIBDIR=$withval/lib
+ fi
POSTGRESQL_BINDIR=$withval/bin
else
AC_MSG_RESULT(no)
AC_MSG_ERROR(Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval)
fi
- AC_DEFINE(HAVE_POSTGRESQL)
- AC_MSG_RESULT(yes)
- POSTGRESQL_LFLAGS="-L$POSTGRESQL_LIBDIR -lpq"
- AC_CHECK_FUNC(crypt, , AC_CHECK_LIB(crypt, crypt, [POSTGRESQL_LFLAGS="$POSTGRESQL_LFLAGS -lcrypt"]))
- SQL_INCLUDE=-I$POSTGRESQL_INCDIR
- SQL_LFLAGS=$POSTGRESQL_LFLAGS
- SQL_BINDIR=$POSTGRESQL_BINDIR
- SQL_LIB=$POSTGRESQL_LIBDIR/libpq.a
-
- db_found=yes
- support_postgresql=yes
- db_type=PostgreSQL
- DB_TYPE=postgresql
- else
- AC_MSG_RESULT(no)
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-AC_SUBST(SQL_BINDIR)
-
-])
-
-
-
-AC_DEFUN([BA_CHECK_SQL_DB],
-[AC_MSG_CHECKING(Checking for various databases)
-dnl# --------------------------------------------------------------------------
-dnl# CHECKING FOR VARIOUS DATABASES (thanks to UdmSearch team)
-dnl# --------------------------------------------------------------------------
-dnl Check for some DBMS backend
-dnl NOTE: we can use only one backend at a time
-db_found=no
-DB_TYPE=none
-
-if test x$support_mysql = xyes; then
- cats=cats
-fi
-
-AC_MSG_CHECKING(for Berkeley DB support)
-AC_ARG_WITH(berkeleydb,
-AC_HELP_STRING([--with-berkeleydb@<:@=DIR@:>@], [Include Berkeley DB support. DIR is the Berkeley DB base install directory, default is to search through a number of common places for the DB files.]),
-[
- if test "$withval" != "no"; then
- if test "$withval" = "yes"; then
- if test -f /usr/include/db.h; then
- BERKELEYDB_INCDIR=/usr/include
- if test -d /usr/lib64; then
- BERKELEYDB_LIBDIR=/usr/lib64
- else
- BERKELEYDB_LIBDIR=/usr/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid Berkeley DB directory - unable to find db.h)
- fi
- else
- if test -f $withval/include/db.h; then
- BERKELEYDB_INCDIR=$withval/include
- if test -d $withval/lib64; then
- BERKELEYDB_LIBDIR=$withval/lib64
- else
- BERKELEYDB_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid Berkeley DB directory - unable to find db.h under $withval)
- fi
- fi
- SQL_INCLUDE=-I$BERKELEYDB_INCDIR
- SQL_LFLAGS="-L$BERKELEYDB_LIBDIR -ldb"
-
- AC_DEFINE(HAVE_BERKELEY_DB)
- AC_MSG_RESULT(yes)
- have_db=yes
- support_mysql=yes
- DB_TYPE=BerkelyDB
-
- else
- AC_MSG_RESULT(no)
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(SQL_LFLAGS)
-AC_SUBST(SQL_INCLUDE)
-
-if test x$support_berkleydb = xyes; then
- cats=cats
-fi
-
-
-
-
-AC_MSG_CHECKING(for mSQL support)
-AC_ARG_WITH(msql,
-AC_HELP_STRING([--with-msql@<:@=DIR@:>@], [Include mSQL support. DIR is the mSQL base install directory @<:@default=/usr/local/Hughes@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
-
- if test "$withval" = "yes"; then
- MSQL_INCDIR=/usr/local/Hughes/include
- if test -d /usr/local/Hughes/lib64; then
- MSQL_LIBDIR=/usr/local/Hughes/lib64
- else
- MSQL_LIBDIR=/usr/local/Hughes/lib
- fi
- else
- MSQL_INCDIR=$withval/include
- if test -d $withval/lib64; then
- MSQL_LIBDIR=$withval/lib64
- else
- MSQL_LIBDIR=$withval/lib
- fi
- fi
- MSQL_INCLUDE=-I$MSQL_INCDIR
- MSQL_LFLAGS="-L$MSQL_LIBDIR -lmsql"
-
- AC_DEFINE(HAVE_MSQL)
- AC_MSG_RESULT(yes)
- have_db=yes
- else
- AC_MSG_RESULT(no)
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(MSQL_LFLAGS)
-AC_SUBST(MSQL_INCLUDE)
-
-
-AC_MSG_CHECKING(for iODBC support)
-AC_ARG_WITH(iodbc,
-AC_HELP_STRING([--with-iodbc@<:@=DIR@:>], [Include iODBC support. DIR is the iODBC base install directory @<:@default=/usr/local@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
-
- if test "$withval" = "yes"; then
- withval=/usr/local
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/include/isql.h; then
- IODBC_INCDIR=$withval/include
- if test -d $withval/lib64; then
- IODBC_LIBDIR=$withval/lib64
- else
- IODBC_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid iODBC directory - unable to find isql.h)
- fi
- IODBC_LFLAGS="-L$IODBC_LIBDIR -liodbc"
- IODBC_INCLUDE=-I$IODBC_INCDIR
- AC_DEFINE(HAVE_IODBC)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(IODBC_LFLAGS)
-AC_SUBST(IODBC_INCLUDE)
-
-
-AC_MSG_CHECKING(for unixODBC support)
-AC_ARG_WITH(unixODBC,
-AC_HELP_STRING([--with-unixODBC@<:@=DIR@:>], [Include unixODBC support. DIR is the unixODBC base install directory @<:@default=/usr/local@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
-
- if test "$withval" = "yes"; then
- withval=/usr/local
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/include/sql.h; then
- UNIXODBC_INCDIR=$withval/include
- if test -d $withval/lib64; then
- UNIXODBC_LIBDIR=$withval/lib64
- else
- UNIXODBC_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid unixODBC directory - unable to find sql.h)
- fi
- UNIXODBC_LFLAGS="-L$UNIXODBC_LIBDIR -lodbc"
- UNIXODBC_INCLUDE=-I$UNIXODBC_INCDIR
- AC_DEFINE(HAVE_UNIXODBC)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(UNIXODBC_LFLAGS)
-AC_SUBST(UNIXODBC_INCLUDE)
-
-
-AC_MSG_CHECKING(for Solid support)
-AC_ARG_WITH(solid,
-AC_HELP_STRING([--with-solid@<:@=DIR@:>], [Include Solid support. DIR is the Solid base install directory @<:@default=/usr/local@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
-
- if test "$withval" = "yes"; then
- withval=/usr/local
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/include/cli0cli.h; then
- SOLID_INCDIR=$withval/include
- if test -d $withval/lib64; then
- SOLID_LIBDIR=$withval/lib64
- else
- SOLID_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid Solid directory - unable to find cli0cli.h)
- fi
- SOLID_LFLAGS="-L$SOLID_LIBDIR -lsolcli"
- SOLID_INCLUDE="-I$SOLID_INCDIR"
- AC_DEFINE(HAVE_SOLID)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(SOLID_LFLAGS)
-AC_SUBST(SOLID_INCLUDE)
-
-AC_MSG_CHECKING(for OpenLink ODBC support)
-AC_ARG_WITH(openlink,
-AC_HELP_STRING([--with-openlink@<:@=DIR@:>], [Include OpenLink ODBC support. DIR is the base OpenLink ODBC install directory]),
-[
- if test "$withval" != "no"; then
- if test "$withval" = "yes"; then
-
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
-
- if test -f /usr/local/virtuoso-ent/odbcsdk/include/isql.h; then
- VIRT_INCDIR=/usr/local/virtuoso-ent/odbcsdk/include/
- if test -d /usr/local/virtuoso-ent/odbcsdk/lib64/; then
- VIRT_LIBDIR=/usr/local/virtuoso-ent/odbcsdk/lib64/
- else
- VIRT_LIBDIR=/usr/local/virtuoso-ent/odbcsdk/lib/
- fi
- elif test -f /usr/local/virtuoso-lite/odbcsdk/include/isql.h; then
- VIRT_INCDIR=/usr/local/virtuoso-lite/odbcsdk/include/
- if test -d /usr/local/virtuoso-lite/odbcsdk/lib64/; then
- VIRT_LIBDIR=/usr/local/virtuoso-lite/odbcsdk/lib64/
- else
- VIRT_LIBDIR=/usr/local/virtuoso-lite/odbcsdk/lib/
- fi
- elif test -f /usr/local/virtuoso/odbcsdk/include/isql.h; then
- VIRT_INCDIR=/usr/local/virtuoso/odbcsdk/include/
- if test -d /usr/local/virtuoso/odbcsdk/lib64/; then
- VIRT_LIBDIR=/usr/local/virtuoso/odbcsdk/lib64/
- else
- VIRT_LIBDIR=/usr/local/virtuoso/odbcsdk/lib/
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid OpenLink ODBC directory - unable to find isql.h)
- fi
- else
- if test -f $withval/odbcsdk/include/isql.h; then
- VIRT_INCDIR=$withval/odbcsdk/include/
- if test -d $withval/odbcsdk/lib64/; then
- VIRT_LIBDIR=$withval/odbcsdk/lib64/
- else
- VIRT_LIBDIR=$withval/odbcsdk/lib/
- fi
- elif test -f $withval/include/isql.h; then
- VIRT_INCDIR=$withval/include/
- if test -d $withval/lib64/; then
- VIRT_LIBDIR=$withval/lib64/
- else
- VIRT_LIBDIR=$withval/lib/
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid OpenLink ODBC directory - unable to find isql.h under $withval)
- fi
- fi
- VIRT_INCLUDE=-I$VIRT_INCDIR
- VIRT_LFLAGS="-L$VIRT_LIBDIR -liodbc"
-
- AC_DEFINE(HAVE_VIRT)
- AC_MSG_RESULT(yes)
- have_db=yes
-
- else
- AC_MSG_RESULT(no)
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(VIRT_LFLAGS)
-AC_SUBST(VIRT_INCLUDE)
-
-
-AC_MSG_CHECKING(for EasySoft ODBC support)
-AC_ARG_WITH(easysoft,
-AC_HELP_STRING([--with-easysoft@<:@=DIR@:>], [Include EasySoft ODBC support. DIR is the base EasySoft ODBC install directory]),
-[
- if test "$withval" != "no"; then
- if test "$withval" = "yes"; then
-
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
-
- if test -f /usr/local/easysoft/oob/client/include/sql.h; then
- EASYSOFT_INCDIR=/usr/local/easysoft/oob/client/include/
- if test -d /usr/local/easysoft/oob/client/lib64/; then
- EASYSOFT_LFLAGS="-L/usr/local/easysoft/oob/client/lib64/ -L/usr/local/easysoft/lib64"
- else
- EASYSOFT_LFLAGS="-L/usr/local/easysoft/oob/client/lib/ -L/usr/local/easysoft/lib"
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid EasySoft ODBC directory - unable to find sql.h)
- fi
- else
- if test -f $withval/easysoft/oob/client/include/sql.h; then
- EASYSOFT_INCDIR=$withval/easysoft/oob/client/include/
- if test -d $withval/easysoft/oob/client/lib64/; then
- EASYSOFT_LFLAGS="-L$withval/easysoft/oob/client/lib64/ -L$withval/easysoft/lib64"
- else
- EASYSOFT_LFLAGS="-L$withval/easysoft/oob/client/lib/ -L$withval/easysoft/lib"
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid EasySoft ODBC directory - unable to find sql.h under $withval)
- fi
- fi
- EASYSOFT_INCLUDE=-I$EASYSOFT_INCDIR
- EASYSOFT_LFLAGS="$EASYSOFT_LFLAGS -lesoobclient -lesrpc -lsupport -lextra"
+ AC_DEFINE(HAVE_POSTGRESQL, 1, [Set if you have an PostgreSQL Database])
+ AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_EASYSOFT)
- AC_MSG_RESULT(yes)
- have_db=yes
+ POSTGRESQL_INCLUDE=-I$POSTGRESQL_INCDIR
+ POSTGRESQL_LIBS="-R $POSTGRESQL_LIBDIR -L$POSTGRESQL_LIBDIR -lpq"
+ AC_CHECK_FUNC(crypt, , AC_CHECK_LIB(crypt, crypt, [POSTGRESQL_LIBS="$POSTGRESQL_LIBS -lcrypt"]))
+ POSTGRESQL_LIB=$POSTGRESQL_LIBDIR/libpq.a
+ DB_LIBS="${DB_LIBS} ${POSTGRESQL_LIBS}"
+ if test -z "${db_backends}"; then
+ db_backends="PostgreSQL"
+ else
+ db_backends="${db_backends} PostgreSQL"
+ fi
+ if test -z "${DB_BACKENDS}"; then
+ DB_BACKENDS="postgresql"
+ else
+ DB_BACKENDS="${DB_BACKENDS} postgresql"
+ fi
else
- AC_MSG_RESULT(no)
+ AC_MSG_RESULT(no)
fi
],[
AC_MSG_RESULT(no)
])
-AC_SUBST(EASYSOFT_LFLAGS)
-AC_SUBST(EASYSOFT_INCLUDE)
-
-
-
-AC_MSG_CHECKING(for InterBase support)
-AC_ARG_WITH(ibase,
-AC_HELP_STRING([--with-ibase@<:@=DIR@:>@], [Include InterBase support. DIR is the InterBase install directory @<:@default=/usr/interbase@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
-
- if test "$withval" = "yes"; then
- withval=/usr/interbase
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/include/ibase.h; then
- IBASE_INCDIR=$withval/include
- if test -d $withval/lib64; then
- IBASE_LIBDIR=$withval/lib64
- else
- IBASE_LIBDIR=$withval/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid InterBase directory - unable to find ibase.h)
- fi
- IBASE_LFLAGS="-L$IBASE_LIBDIR -lgds"
- IBASE_INCLUDE=-I$IBASE_INCDIR
- AC_DEFINE(HAVE_IBASE)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(IBASE_LFLAGS)
-AC_SUBST(IBASE_INCLUDE)
-
-AC_MSG_CHECKING(for Oracle8 support)
-AC_ARG_WITH(oracle8,
-AC_HELP_STRING([--with-oracle8@<:@=DIR@:>@], [Include Oracle8 support. DIR is the Oracle home directory @<:@default=$ORACLE_HOME or /oracle8/app/oracle/product/8.0.5@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
-
- if test "$withval" = "yes"; then
- withval="$ORACLE_HOME"
- if test "$withval" = ""; then
- withval=/oracle8/app/oracle/product/8.0.5
- fi
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/rdbms/demo/oci.h; then
- ORACLE8_INCDIR1=$withval/rdbms/demo/
- ORACLE8_INCDIR2=$withval/rdbms/public/:
- ORACLE8_INCDIR3=$withval/network/public/
- ORACLE8_INCDIR4=$withval/plsql/public/
- if test -d $withval/lib64; then
- ORACLE8_LIBDIR1=$withval/lib64
- else
- ORACLE8_LIBDIR1=$withval/lib
- fi
- if test -d $withval/rdbms/lib64; then
- ORACLE8_LIBDIR2=$withval/rdbms/lib64
- else
- ORACLE8_LIBDIR2=$withval/rdbms/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid ORACLE directory - unable to find oci.h)
- fi
- if test -f $withval/lib64/libclntsh.so; then
- ORACLE8_LFLAGS="-L$ORACLE8_LIBDIR1 -L$ORACLE8_LIBDIR2 $withval/lib64/libclntsh.so -lmm -lepc -lclient -lvsn -lcommon -lgeneric -lcore4 -lnlsrtl3 -lnsl -lm -ldl -lnetv2 -lnttcp -lnetwork -lncr -lsql"
- else
- ORACLE8_LFLAGS="-L$ORACLE8_LIBDIR1 -L$ORACLE8_LIBDIR2 $withval/lib/libclntsh.so -lmm -lepc -lclient -lvsn -lcommon -lgeneric -lcore4 -lnlsrtl3 -lnsl -lm -ldl -lnetv2 -lnttcp -lnetwork -lncr -lsql"
- fi
- ORACLE8_INCLUDE="-I$ORACLE8_INCDIR1 -I$ORACLE8_INCDIR2 -I$ORACLE8_INCDIR3 -I$ORACLE8_INCDIR4"
- AC_DEFINE(HAVE_ORACLE8)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
-])
-AC_SUBST(ORACLE8_LFLAGS)
-AC_SUBST(ORACLE8_INCLUDE)
-
-
-AC_MSG_CHECKING(for Oracle7 support)
-AC_ARG_WITH(oracle7,
-AC_HELP_STRING([--with-oracle7@<:@=DIR@:>@], [Include Oracle 7.3 support. DIR is the Oracle home directory @<:@default=$ORACLE_HOME@:>@]),
-[
- if test "$withval" != "no"; then
- if test "$have_db" = "yes"; then
- AC_MSG_RESULT(error)
- AC_MSG_ERROR("You can configure for only one database.");
- fi
- fi
+AC_SUBST(POSTGRESQL_LIBS)
+AC_SUBST(POSTGRESQL_INCLUDE)
+AC_SUBST(POSTGRESQL_BINDIR)
- if test "$withval" = "yes"; then
- withval="$ORACLE_HOME"
- fi
-
- if test "$withval" != "no"; then
- if test -f $withval/rdbms/demo/ocidfn.h; then
- ORACLE7_INCDIR=$withval/rdbms/demo/
- if test -d $withval/lib64; then
- ORACLE7_LIBDIR1=$withval/lib64
- else
- ORACLE7_LIBDIR1=$withval/lib
- fi
- if test -d $withval/rdbms/lib64; then
- ORACLE7_LIBDIR2=$withval/rdbms/lib64
- else
- ORACLE7_LIBDIR2=$withval/rdbms/lib
- fi
- else
- AC_MSG_RESULT(no)
- AC_MSG_ERROR(Invalid ORACLE directory - unable to find ocidfn.h)
- fi
-
- ORACLEINST_TOP=$withval
- if test -f "$ORACLEINST_TOP/rdbms/lib/sysliblist"
- then
- ORA_SYSLIB="`cat $ORACLEINST_TOP/rdbms/lib/sysliblist`"
- elif test -f "$ORACLEINST_TOP/rdbms/lib64/sysliblist"
- then
- ORA_SYSLIB="`cat $ORACLEINST_TOP/rdbms/lib64/sysliblist`"
- elif test -f "$ORACLEINST_TOP/lib/sysliblist"
- then
- ORA_SYSLIB="`cat $ORACLEINST_TOP/lib/sysliblist`"
- elif test -f "$ORACLEINST_TOP/lib64/sysliblist"
- then
- ORA_SYSLIB="`cat $ORACLEINST_TOP/lib64/sysliblist`"
- else
- ORA_SYSLIB="-lm"
- fi
-
- ORACLE7_LFLAGS="-L$ORACLE7_LIBDIR1 -L$ORACLE7_LIBDIR2 \
- -lclient -lsqlnet -lncr -lsqlnet -lclient -lcommon \
- -lgeneric -lsqlnet -lncr -lsqlnet -lclient -lcommon -lgeneric \
- -lepc -lnlsrtl3 -lc3v6 -lcore3 -lnlsrtl3 -lcore3 -lnlsrtl3 \
- $ORA_SYSLIB -lcore3 $ORA_SYSLIB"
- ORACLE7_INCLUDE="-I$ORACLE7_INCDIR "
- AC_DEFINE(HAVE_ORACLE7)
- AC_MSG_RESULT(yes)
- have_db=yes
- fi
-],[
- AC_MSG_RESULT(no)
])
-AC_SUBST(ORACLE7_LFLAGS)
-AC_SUBST(ORACLE7_INCLUDE)
-])
-
AC_DEFUN([AM_CONDITIONAL],
[AC_SUBST($1_TRUE)
--- /dev/null
+AC_DEFUN([BA_CHECK_DBI_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for DBI support)
+AC_ARG_WITH(dbi,
+AC_HELP_STRING([--with-dbi@<:@=DIR@:>@], [Include DBI support. DIR is the DBD base install directory, default is to search through a number of common places for the DBI files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/local/dbi/include
+ if test -d /usr/local/lib64; then
+ DBI_LIBDIR=/usr/local/lib64
+ else
+ DBI_LIBDIR=/usr/local/lib
+ fi
+ DBI_BINDIR=/usr/local/bin
+ elif test -f /usr/include/dbi/dbi.h; then
+ DBI_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ DBI_LIBDIR=/usr/lib64
+ else
+ DBI_LIBDIR=/usr/lib
+ fi
+ DBI_BINDIR=/usr/bin
+ elif test -f $prefix/include/dbi/dbi.h; then
+ DBI_INCDIR=$prefix/include
+ if test -d $prefix/lib64; then
+ DBI_LIBDIR=$prefix/lib64
+ else
+ DBI_LIBDIR=$prefix/lib
+ fi
+ DBI_BINDIR=$prefix/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find dbi.h in standard locations)
+ fi
+ if test -d /usr/local/lib/dbd; then
+ DRIVERDIR=/usr/local/lib/dbd
+ if test -d /usr/local/lib64/dbd; then
+ DRIVERDIR=/usr/local/lib64/dbd
+ else
+ DRIVERDIR=/usr/local/lib/dbd
+ fi
+ elif test -d /usr/lib/dbd; then
+ DRIVERDIR=/usr/lib/dbd
+ if test -d /usr/lib64/dbd; then
+ DRIVERDIR=/usr/lib64/dbd
+ else
+ DRIVERDIR=/usr/lib/dbd
+ fi
+ elif test -d $prefix/lib/dbd; then
+ if test -d $prefix/lib64/dbd; then
+ DRIVERDIR=$prefix/lib64/dbd
+ else
+ DRIVERDIR=$prefix/lib/dbd
+ fi
+ elif test -d /usr/local/lib64/dbd; then
+ DRIVERDIR=/usr/local/lib64/dbd
+ elif test -d /usr/lib64/dbd; then
+ DRIVERDIR=/usr/lib64/dbd
+ elif test -d $prefix/lib64/dbd; then
+ DRIVERDIR=$prefix/lib64/dbd
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find DBD drivers in standard locations)
+ fi
+ else
+ if test -f $withval/dbi.h; then
+ DBI_INCDIR=$withval
+ DBI_LIBDIR=$withval
+ DBI_BINDIR=$withval
+ elif test -f $withval/include/dbi/dbi.h; then
+ DBI_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ DBI_LIBDIR=$withval/lib64
+ else
+ DBI_LIBDIR=$withval/lib
+ fi
+ DBI_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid DBI directory $withval - unable to find dbi.h under $withval)
+ fi
+ if test -d $withval/dbd; then
+ DRIVERDIR=$withval/dbd
+ elif test -d $withval/lib/; then
+ if test -d $withval/lib64/dbd; then
+ DRIVERDIR=$withval/lib64/dbd
+ else
+ DRIVERDIR=$withval/lib/dbd
+ fi
+ elif test -d $withval/lib64/dbd; then
+ DRIVERDIR=$withval/lib64/dbd
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid DBD driver directory $withval - unable to find DBD drivers under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$DBI_INCDIR
+ SQL_LFLAGS="-L$DBI_LIBDIR -ldbi"
+ SQL_BINDIR=$DBI_BINDIR
+ SQL_LIB=$DBI_LIBDIR/libdbi.a
+ DBI_DBD_DRIVERDIR="-D DBI_DRIVER_DIR=\\\"$DRIVERDIR\\\""
+
+ AC_DEFINE(HAVE_DBI, 1, [Set if you have the DBI driver])
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_dbi=yes
+ db_type=DBI
+ DB_TYPE=dbi
+ uncomment_dbi=" "
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+AC_SUBST(DBI_DBD_DRIVERDIR)
+AC_SUBST(uncomment_dbi)
+
+])
+
+AC_DEFUN([BA_CHECK_DBI_DRIVER],
+[
+db_found=no
+db_prog=no
+AC_MSG_CHECKING(for DBI drivers support)
+AC_ARG_WITH(dbi-driver,
+AC_HELP_STRING([--with-dbi-driver@<:@=DRIVER@:>@], [Suport for DBI driver. DRIVER is the one DBI driver like Mysql, Postgresql, others. Default is to not configure any driver.]),
+[
+ if test "$withval" != "no"; then
+ case $withval in
+ "mysql")
+ db_prog="mysql"
+ if test -f /usr/local/mysql/bin/mysql; then
+ SQL_BINDIR=/usr/local/mysql/bin
+ if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=/usr/local/mysql/lib64/mysql
+ else
+ SQL_LIBDIR=/usr/local/mysql/lib/mysql
+ fi
+ elif test -f /usr/bin/mysql; then
+ SQL_BINDIR=/usr/bin
+ if test -f /usr/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/lib64/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=/usr/lib64/mysql
+ elif test -f /usr/lib/mysql/libmysqlclient_r.a \
+ -o -f /usr/lib/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=/usr/lib/mysql
+ else
+ SQL_LIBDIR=/usr/lib
+ fi
+ elif test -f /usr/local/bin/mysql; then
+ SQL_BINDIR=/usr/local/bin
+ if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=/usr/local/lib64/mysql
+ elif test -f /usr/local/lib/mysql/libmysqlclient_r.a \
+ -o -f /usr/local/lib/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=/usr/local/lib/mysql
+ else
+ SQL_LIBDIR=/usr/local/lib
+ fi
+ elif test -f $withval/bin/mysql; then
+ SQL_BINDIR=$withval/bin
+ if test -f $withval/lib64/mysql/libmysqlclient_r.a \
+ -o -f $withval/lib64/mysql/libmysqlclient_r.so; then
+ SQL_LIBDIR=$withval/lib64/mysql
+ elif test -f $withval/lib64/libmysqlclient_r.a \
+ -o -f $withval/lib64/libmysqlclient_r.so; then
+ SQL_LIBDIR=$withval/lib64
+ elif test -f $withval/lib/libmysqlclient_r.a \
+ -o -f $withval/lib/libmysqlclient_r.so; then
+ SQL_LIBDIR=$withval/lib/
+ else
+ SQL_LIBDIR=$withval/lib/mysql
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find mysql in standard locations)
+ fi
+ if test -f $SQL_LIBDIR/libmysqlclient_r.so; then
+ DB_PROG_LIB=$SQL_LIBDIR/libmysqlclient_r.so
+ else
+ DB_PROG_LIB=$SQL_LIBDIR/libmysqlclient_r.a
+ fi
+ ;;
+ "postgresql")
+ db_prog="postgresql"
+ PG_CONFIG=`which pg_config`
+ if test -n "$PG_CONFIG"; then
+ SQL_BINDIR=`"$PG_CONFIG" --bindir`
+ SQL_LIBDIR=`"$PG_CONFIG" --libdir`
+ elif test -f /usr/local/bin/psql; then
+ SQL_BINDIR=/usr/local/bin
+ if test -d /usr/local/lib64; then
+ SQL_LIBDIR=/usr/local/lib64
+ else
+ SQL_LIBDIR=/usr/local/lib
+ fi
+ elif test -f /usr/bin/psql; then
+ SQL_BINDIR=/usr/local/bin
+ if test -d /usr/lib64/postgresql; then
+ SQL_LIBDIR=/usr/lib64/postgresql
+ elif test -d /usr/lib/postgresql; then
+ SQL_LIBDIR=/usr/lib/postgresql
+ elif test -d /usr/lib64; then
+ SQL_LIBDIR=/usr/lib64
+ else
+ SQL_LIBDIR=/usr/lib
+ fi
+ elif test -f $withval/bin/psql; then
+ SQL_BINDIR=$withval/bin
+ if test -d $withval/lib64; then
+ SQL_LIBDIR=$withval/lib64
+ else
+ SQL_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find psql in standard locations)
+ fi
+ if test -f $SQL_LIBDIR/libpq.so; then
+ DB_PROG_LIB=$SQL_LIBDIR/libpq.so
+ else
+ DB_PROG_LIB=$SQL_LIBDIR/libpq.a
+ fi
+ ;;
+ "sqlite")
+ db_prog="sqlite"
+ if test -f /usr/local/bin/sqlite; then
+ SQL_BINDIR=/usr/local/bin
+ if test -d /usr/local/lib64; then
+ SQL_LIBDIR=/usr/local/lib64
+ else
+ SQL_LIBDIR=/usr/local/lib
+ fi
+ elif test -f /usr/bin/sqlite; then
+ SQL_BINDIR=/usr/bin
+ if test -d /usr/lib64; then
+ SQL_LIBDIR=/usr/lib64
+ else
+ SQL_LIBDIR=/usr/lib
+ fi
+ elif test -f $withval/bin/sqlite; then
+ SQL_BINDIR=$withval/bin
+ if test -d $withval/lib64; then
+ SQL_LIBDIR=$withval/lib64
+ else
+ SQL_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find sqlite in standard locations)
+ fi
+ if test -f $SQL_LIBDIR/libsqlite.so; then
+ DB_PROG_LIB=$SQL_LIBDIR/libsqlite.so
+ else
+ DB_PROG_LIB=$SQL_LIBDIR/libsqlite.a
+ fi
+ ;;
+ "sqlite3")
+ db_prog="sqlite3"
+ if test -f /usr/local/bin/sqlite3; then
+ SQL_BINDIR=/usr/local/bin
+ if test -d /usr/local/lib64; then
+ SQL_LIBDIR=/usr/local/lib64
+ else
+ SQL_LIBDIR=/usr/local/lib
+ fi
+ elif test -f /usr/bin/sqlite3; then
+ SQL_BINDIR=/usr/bin
+ if test -d /usr/lib64; then
+ SQL_LIBDIR=/usr/lib64
+ else
+ SQL_LIBDIR=/usr/lib
+ fi
+ elif test -f $withval/bin/sqlite3; then
+ SQL_BINDIR=$withval/bin
+ if test -d $withval/lib64; then
+ SQL_LIBDIR=$withval/lib64
+ else
+ SQL_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find sqlite in standard locations)
+ fi
+ if test -f $SQL_LIBDIR/libsqlite3.so; then
+ DB_PROG_LIB=$SQL_LIBDIR/libsqlite3.so
+ else
+ DB_PROG_LIB=$SQL_LIBDIR/libsqlite3.a
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to set DBI driver. $withval is not supported)
+ ;;
+ esac
+
+ AC_MSG_RESULT(yes)
+ DB_PROG=$db_prog
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_BINDIR)
+AC_SUBST(DB_PROG)
+AC_SUBST(DB_PROG_LIB)
+
+])
+
+
+AC_DEFUN([BA_CHECK_MYSQL_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for MySQL support)
+AC_ARG_WITH(mysql,
+AC_HELP_STRING([--with-mysql@<:@=DIR@:>@], [Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/mysql/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/local/mysql/include/mysql
+ if test -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/local/mysql/lib64/mysql/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql
+ else
+ MYSQL_LIBDIR=/usr/local/mysql/lib/mysql
+ fi
+ MYSQL_BINDIR=/usr/local/mysql/bin
+ elif test -f /usr/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/include/mysql
+ if test -f /usr/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/lib64/mysql/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/lib64/mysql
+ elif test -f /usr/lib64/libmysqlclient_r.a \
+ -o -f /usr/lib64/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/lib64
+ elif test -f /usr/lib/mysql/libmysqlclient_r.a \
+ -o -f /usr/lib/mysql/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/lib/mysql
+ else
+ MYSQL_LIBDIR=/usr/lib
+ fi
+ MYSQL_BINDIR=/usr/bin
+ elif test -f /usr/include/mysql.h; then
+ MYSQL_INCDIR=/usr/include
+ if test -f /usr/lib64/libmysqlclient_r.a \
+ -o -f /usr/lib64/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/lib64
+ else
+ MYSQL_LIBDIR=/usr/lib
+ fi
+ MYSQL_BINDIR=/usr/bin
+ elif test -f /usr/local/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/local/include/mysql
+ if test -f /usr/local/lib64/mysql/libmysqlclient_r.a \
+ -o -f /usr/local/lib64/mysql/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/local/lib64/mysql
+ else
+ MYSQL_LIBDIR=/usr/local/lib/mysql
+ fi
+ MYSQL_BINDIR=/usr/local/bin
+ elif test -f /usr/local/include/mysql.h; then
+ MYSQL_INCDIR=/usr/local/include
+ if test -f /usr/local/lib64/libmysqlclient_r.a \
+ -o -f /usr/local/lib64/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=/usr/local/lib64
+ else
+ MYSQL_LIBDIR=/usr/local/lib
+ fi
+ MYSQL_BINDIR=/usr/local/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find mysql.h in standard locations)
+ fi
+ else
+ if test -f $withval/include/mysql/mysql.h; then
+ MYSQL_INCDIR=$withval/include/mysql
+ if test -f $withval/lib64/mysql/libmysqlclient_r.a \
+ -o -f $withval/lib64/mysql/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=$withval/lib64/mysql
+ elif test -f $withval/lib64/libmysqlclient_r.a \
+ -o -f $withval/lib64/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=$withval/lib64
+ elif test -f $withval/lib/libmysqlclient_r.a \
+ -o -f $withval/lib/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=$withval/lib
+ else
+ MYSQL_LIBDIR=$withval/lib/mysql
+ fi
+ MYSQL_BINDIR=$withval/bin
+ elif test -f $withval/include/mysql.h; then
+ MYSQL_INCDIR=$withval/include
+ if test -f $withval/lib64/libmysqlclient_r.a \
+ -o -f $withval/lib64/libmysqlclient_r.so; then
+ MYSQL_LIBDIR=$withval/lib64
+ else
+ MYSQL_LIBDIR=$withval/lib
+ fi
+ MYSQL_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$MYSQL_INCDIR
+ if test -f $MYSQL_LIBDIR/libmysqlclient_r.a \
+ -o -f $MYSQL_LIBDIR/libmysqlclient_r.so; then
+ SQL_LFLAGS="-L$MYSQL_LIBDIR -lmysqlclient_r -lz"
+ AC_DEFINE(HAVE_THREAD_SAFE_MYSQL)
+ fi
+ SQL_BINDIR=$MYSQL_BINDIR
+ SQL_LIB=$MYSQL_LIBDIR/libmysqlclient_r.a
+
+ AC_DEFINE(HAVE_MYSQL, 1, [Set if you have an MySQL Database])
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_mysql=yes
+ db_type=MySQL
+ DB_TYPE=mysql
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+]
+)
+
+AC_ARG_WITH(embedded-mysql,
+AC_HELP_STRING([--with-embedded-mysql@<:@=DIR@:>@], [Include MySQL support. DIR is the MySQL base install directory, default is to search through a number of common places for the MySQL files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/mysql/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/local/mysql/include/mysql
+ if test -d /usr/local/mysql/lib64/mysql; then
+ MYSQL_LIBDIR=/usr/local/mysql/lib64/mysql
+ else
+ MYSQL_LIBDIR=/usr/local/mysql/lib/mysql
+ fi
+ MYSQL_BINDIR=/usr/local/mysql/bin
+ elif test -f /usr/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/include/mysql
+ if test -d /usr/lib64/mysql; then
+ MYSQL_LIBDIR=/usr/lib64/mysql
+ else
+ MYSQL_LIBDIR=/usr/lib/mysql
+ fi
+ MYSQL_BINDIR=/usr/bin
+ elif test -f /usr/include/mysql.h; then
+ MYSQL_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ MYSQL_LIBDIR=/usr/lib64
+ else
+ MYSQL_LIBDIR=/usr/lib
+ fi
+ MYSQL_BINDIR=/usr/bin
+ elif test -f /usr/local/include/mysql/mysql.h; then
+ MYSQL_INCDIR=/usr/local/include/mysql
+ if test -d /usr/local/lib64/mysql; then
+ MYSQL_LIBDIR=/usr/local/lib64/mysql
+ else
+ MYSQL_LIBDIR=/usr/local/lib/mysql
+ fi
+ MYSQL_BINDIR=/usr/local/bin
+ elif test -f /usr/local/include/mysql.h; then
+ MYSQL_INCDIR=/usr/local/include
+ if test -d /usr/local/lib64; then
+ MYSQL_LIBDIR=/usr/local/lib64
+ else
+ MYSQL_LIBDIR=/usr/local/lib
+ fi
+ MYSQL_BINDIR=/usr/local/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find mysql.h in standard locations)
+ fi
+ else
+ if test -f $withval/include/mysql/mysql.h; then
+ MYSQL_INCDIR=$withval/include/mysql
+ if test -d $withval/lib64/mysql; then
+ MYSQL_LIBDIR=$withval/lib64/mysql
+ else
+ MYSQL_LIBDIR=$withval/lib/mysql
+ fi
+ MYSQL_BINDIR=$withval/bin
+ elif test -f $withval/include/mysql.h; then
+ MYSQL_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ MYSQL_LIBDIR=$withval/lib64
+ else
+ MYSQL_LIBDIR=$withval/lib
+ fi
+ MYSQL_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid MySQL directory $withval - unable to find mysql.h under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$MYSQL_INCDIR
+ SQL_LFLAGS="-L$MYSQL_LIBDIR -lmysqld -lz -lm -lcrypt"
+ SQL_BINDIR=$MYSQL_BINDIR
+ SQL_LIB=$MYSQL_LIBDIR/libmysqld.a
+
+ AC_DEFINE(HAVE_MYSQL)
+ AC_DEFINE(HAVE_EMBEDDED_MYSQL)
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_mysql=yes
+ db_type=MySQL
+ DB_TYPE=mysql
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+]
+)
+
+
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+
+])
+
+
+AC_DEFUN([BA_CHECK_INGRES_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for Ingres support)
+AC_ARG_WITH(ingres,
+AC_HELP_STRING([--with-ingres@<:@=DIR@:>@], [Include Ingres support. DIR is the Ingres base install directory, default is to search through a number of common places for the Ingres files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f ${II_SYSTEM}/files/eqdefc.h; then
+ INGRES_INCDIR=${II_SYSTEM}/files
+ INGRES_LIBDIR=${II_SYSTEM}/lib
+ INGRES_BINDIR=${II_SYSTEM}/bin
+ elif test -f ${II_SYSTEM}/ingres/files/eqdefc.h; then
+ INGRES_INCDIR=${II_SYSTEM}/ingres/files
+ INGRES_LIBDIR=${II_SYSTEM}/ingres/lib
+ INGRES_BINDIR=${II_SYSTEM}/ingres/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find eqdefc.h in standard locations)
+ fi
+ else
+ if test -f $withval/files/eqdefc.h; then
+ INGRES_INCDIR=$withval/files
+ INGRES_LIBDIR=$withval/lib
+ INGRES_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid Ingres directory $withval - unable to find Ingres headers under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$INGRES_INCDIR
+ SQL_LFLAGS="-L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1"
+ SQL_BINDIR=$INGRES_BINDIR
+ SQL_LIB=$INGRES_LIBDIR/libingres.a
+ AC_DEFINE(HAVE_INGRES, 1, [Set if have Ingres Database])
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_ingres=yes
+ db_type=Ingres
+ DB_TYPE=ingres
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+])
+
+AC_DEFUN([BA_CHECK_SQLITE3_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for SQLite3 support)
+AC_ARG_WITH(sqlite3,
+AC_HELP_STRING([--with-sqlite3@<:@=DIR@:>@], [Include SQLite3 support. DIR is the SQLite3 base install directory, default is to search through a number of common places for the SQLite3 files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/local/include/sqlite3.h; then
+ SQLITE_INCDIR=/usr/local/include
+ if test -d /usr/local/lib64; then
+ SQLITE_LIBDIR=/usr/local/lib64
+ else
+ SQLITE_LIBDIR=/usr/local/lib
+ fi
+ SQLITE_BINDIR=/usr/local/bin
+ elif test -f /usr/include/sqlite3.h; then
+ SQLITE_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ SQLITE_LIBDIR=/usr/lib64
+ else
+ SQLITE_LIBDIR=/usr/lib
+ fi
+ SQLITE_BINDIR=/usr/bin
+ elif test -f $prefix/include/sqlite3.h; then
+ SQLITE_INCDIR=$prefix/include
+ if test -d $prefix/lib64; then
+ SQLITE_LIBDIR=$prefix/lib64
+ else
+ SQLITE_LIBDIR=$prefix/lib
+ fi
+ SQLITE_BINDIR=$prefix/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find sqlite3.h in standard locations)
+ fi
+ else
+ if test -f $withval/sqlite3.h; then
+ SQLITE_INCDIR=$withval
+ SQLITE_LIBDIR=$withval
+ SQLITE_BINDIR=$withval
+ elif test -f $withval/include/sqlite3.h; then
+ SQLITE_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ SQLITE_LIBDIR=$withval/lib64
+ else
+ SQLITE_LIBDIR=$withval/lib
+ fi
+ SQLITE_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid SQLite3 directory $withval - unable to find sqlite3.h under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$SQLITE_INCDIR
+ SQL_LFLAGS="-L$SQLITE_LIBDIR -lsqlite3"
+ SQL_BINDIR=$SQLITE_BINDIR
+ SQL_LIB=$SQLITE_LIBDIR/libsqlite3.a
+
+ AC_DEFINE(HAVE_SQLITE3)
+ AC_MSG_RESULT(yes)
+ db_found=yes
+ support_sqlite3=yes
+ db_type=SQLite3
+ DB_TYPE=sqlite3
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+
+])
+
+
+AC_DEFUN([BA_CHECK_POSTGRESQL_DB],
+[
+db_found=no
+AC_MSG_CHECKING(for PostgreSQL support)
+AC_ARG_WITH(postgresql,
+AC_HELP_STRING([--with-postgresql@<:@=DIR@:>@], [Include PostgreSQL support. DIR is the PostgreSQL base install directory, @<:@default=/usr/local/pgsql@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$db_found" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ if test "$withval" = "yes"; then
+ PG_CONFIG=`which pg_config`
+ if test -n "$PG_CONFIG";then
+ POSTGRESQL_INCDIR=`"$PG_CONFIG" --includedir`
+ POSTGRESQL_LIBDIR=`"$PG_CONFIG" --libdir`
+ POSTGRESQL_BINDIR=`"$PG_CONFIG" --bindir`
+ elif test -f /usr/local/include/libpq-fe.h; then
+ POSTGRESQL_INCDIR=/usr/local/include
+ if test -d /usr/local/lib64; then
+ POSTGRESQL_LIBDIR=/usr/local/lib64
+ else
+ POSTGRESQL_LIBDIR=/usr/local/lib
+ fi
+ POSTGRESQL_BINDIR=/usr/local/bin
+ elif test -f /usr/include/libpq-fe.h; then
+ POSTGRESQL_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ POSTGRESQL_LIBDIR=/usr/lib64
+ else
+ POSTGRESQL_LIBDIR=/usr/lib
+ fi
+ POSTGRESQL_BINDIR=/usr/bin
+ elif test -f /usr/include/pgsql/libpq-fe.h; then
+ POSTGRESQL_INCDIR=/usr/include/pgsql
+ if test -d /usr/lib64/pgsql; then
+ POSTGRESQL_LIBDIR=/usr/lib64/pgsql
+ else
+ POSTGRESQL_LIBDIR=/usr/lib/pgsql
+ fi
+ POSTGRESQL_BINDIR=/usr/bin
+ elif test -f /usr/include/postgresql/libpq-fe.h; then
+ POSTGRESQL_INCDIR=/usr/include/postgresql
+ if test -d /usr/lib64/postgresql; then
+ POSTGRESQL_LIBDIR=/usr/lib64/postgresql
+ else
+ POSTGRESQL_LIBDIR=/usr/lib/postgresql
+ fi
+ POSTGRESQL_BINDIR=/usr/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Unable to find libpq-fe.h in standard locations)
+ fi
+ elif test -f $withval/include/libpq-fe.h; then
+ POSTGRESQL_INCDIR=$withval/include
+ POSTGRESQL_LIBDIR=$withval/lib
+ POSTGRESQL_BINDIR=$withval/bin
+ elif test -f $withval/include/postgresql/libpq-fe.h; then
+ POSTGRESQL_INCDIR=$withval/include/postgresql
+ if test -d $withval/lib64; then
+ POSTGRESQL_LIBDIR=$withval/lib64
+ else
+ POSTGRESQL_LIBDIR=$withval/lib
+ fi
+ POSTGRESQL_BINDIR=$withval/bin
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval)
+ fi
+ AC_DEFINE(HAVE_POSTGRESQL)
+ AC_MSG_RESULT(yes)
+ POSTGRESQL_LFLAGS="-L$POSTGRESQL_LIBDIR -lpq"
+ AC_CHECK_FUNC(crypt, , AC_CHECK_LIB(crypt, crypt, [POSTGRESQL_LFLAGS="$POSTGRESQL_LFLAGS -lcrypt"]))
+ SQL_INCLUDE=-I$POSTGRESQL_INCDIR
+ SQL_LFLAGS=$POSTGRESQL_LFLAGS
+ SQL_BINDIR=$POSTGRESQL_BINDIR
+ SQL_LIB=$POSTGRESQL_LIBDIR/libpq.a
+
+ db_found=yes
+ support_postgresql=yes
+ db_type=PostgreSQL
+ DB_TYPE=postgresql
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+AC_SUBST(SQL_BINDIR)
+
+])
+
+
+
+AC_DEFUN([BA_CHECK_SQL_DB],
+[AC_MSG_CHECKING(Checking for various databases)
+dnl# --------------------------------------------------------------------------
+dnl# CHECKING FOR VARIOUS DATABASES (thanks to UdmSearch team)
+dnl# --------------------------------------------------------------------------
+dnl Check for some DBMS backend
+dnl NOTE: we can use only one backend at a time
+db_found=no
+DB_TYPE=none
+
+if test x$support_mysql = xyes; then
+ cats=cats
+fi
+
+AC_MSG_CHECKING(for Berkeley DB support)
+AC_ARG_WITH(berkeleydb,
+AC_HELP_STRING([--with-berkeleydb@<:@=DIR@:>@], [Include Berkeley DB support. DIR is the Berkeley DB base install directory, default is to search through a number of common places for the DB files.]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+ if test -f /usr/include/db.h; then
+ BERKELEYDB_INCDIR=/usr/include
+ if test -d /usr/lib64; then
+ BERKELEYDB_LIBDIR=/usr/lib64
+ else
+ BERKELEYDB_LIBDIR=/usr/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid Berkeley DB directory - unable to find db.h)
+ fi
+ else
+ if test -f $withval/include/db.h; then
+ BERKELEYDB_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ BERKELEYDB_LIBDIR=$withval/lib64
+ else
+ BERKELEYDB_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid Berkeley DB directory - unable to find db.h under $withval)
+ fi
+ fi
+ SQL_INCLUDE=-I$BERKELEYDB_INCDIR
+ SQL_LFLAGS="-L$BERKELEYDB_LIBDIR -ldb"
+
+ AC_DEFINE(HAVE_BERKELEY_DB)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ support_mysql=yes
+ DB_TYPE=BerkelyDB
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SQL_LFLAGS)
+AC_SUBST(SQL_INCLUDE)
+
+if test x$support_berkleydb = xyes; then
+ cats=cats
+fi
+
+
+
+
+AC_MSG_CHECKING(for mSQL support)
+AC_ARG_WITH(msql,
+AC_HELP_STRING([--with-msql@<:@=DIR@:>@], [Include mSQL support. DIR is the mSQL base install directory @<:@default=/usr/local/Hughes@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+
+ if test "$withval" = "yes"; then
+ MSQL_INCDIR=/usr/local/Hughes/include
+ if test -d /usr/local/Hughes/lib64; then
+ MSQL_LIBDIR=/usr/local/Hughes/lib64
+ else
+ MSQL_LIBDIR=/usr/local/Hughes/lib
+ fi
+ else
+ MSQL_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ MSQL_LIBDIR=$withval/lib64
+ else
+ MSQL_LIBDIR=$withval/lib
+ fi
+ fi
+ MSQL_INCLUDE=-I$MSQL_INCDIR
+ MSQL_LFLAGS="-L$MSQL_LIBDIR -lmsql"
+
+ AC_DEFINE(HAVE_MSQL)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(MSQL_LFLAGS)
+AC_SUBST(MSQL_INCLUDE)
+
+
+AC_MSG_CHECKING(for iODBC support)
+AC_ARG_WITH(iodbc,
+AC_HELP_STRING([--with-iodbc@<:@=DIR@:>], [Include iODBC support. DIR is the iODBC base install directory @<:@default=/usr/local@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval=/usr/local
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/include/isql.h; then
+ IODBC_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ IODBC_LIBDIR=$withval/lib64
+ else
+ IODBC_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid iODBC directory - unable to find isql.h)
+ fi
+ IODBC_LFLAGS="-L$IODBC_LIBDIR -liodbc"
+ IODBC_INCLUDE=-I$IODBC_INCDIR
+ AC_DEFINE(HAVE_IODBC)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(IODBC_LFLAGS)
+AC_SUBST(IODBC_INCLUDE)
+
+
+AC_MSG_CHECKING(for unixODBC support)
+AC_ARG_WITH(unixODBC,
+AC_HELP_STRING([--with-unixODBC@<:@=DIR@:>], [Include unixODBC support. DIR is the unixODBC base install directory @<:@default=/usr/local@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval=/usr/local
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/include/sql.h; then
+ UNIXODBC_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ UNIXODBC_LIBDIR=$withval/lib64
+ else
+ UNIXODBC_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid unixODBC directory - unable to find sql.h)
+ fi
+ UNIXODBC_LFLAGS="-L$UNIXODBC_LIBDIR -lodbc"
+ UNIXODBC_INCLUDE=-I$UNIXODBC_INCDIR
+ AC_DEFINE(HAVE_UNIXODBC)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(UNIXODBC_LFLAGS)
+AC_SUBST(UNIXODBC_INCLUDE)
+
+
+AC_MSG_CHECKING(for Solid support)
+AC_ARG_WITH(solid,
+AC_HELP_STRING([--with-solid@<:@=DIR@:>], [Include Solid support. DIR is the Solid base install directory @<:@default=/usr/local@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval=/usr/local
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/include/cli0cli.h; then
+ SOLID_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ SOLID_LIBDIR=$withval/lib64
+ else
+ SOLID_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid Solid directory - unable to find cli0cli.h)
+ fi
+ SOLID_LFLAGS="-L$SOLID_LIBDIR -lsolcli"
+ SOLID_INCLUDE="-I$SOLID_INCDIR"
+ AC_DEFINE(HAVE_SOLID)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(SOLID_LFLAGS)
+AC_SUBST(SOLID_INCLUDE)
+
+AC_MSG_CHECKING(for OpenLink ODBC support)
+AC_ARG_WITH(openlink,
+AC_HELP_STRING([--with-openlink@<:@=DIR@:>], [Include OpenLink ODBC support. DIR is the base OpenLink ODBC install directory]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+
+ if test -f /usr/local/virtuoso-ent/odbcsdk/include/isql.h; then
+ VIRT_INCDIR=/usr/local/virtuoso-ent/odbcsdk/include/
+ if test -d /usr/local/virtuoso-ent/odbcsdk/lib64/; then
+ VIRT_LIBDIR=/usr/local/virtuoso-ent/odbcsdk/lib64/
+ else
+ VIRT_LIBDIR=/usr/local/virtuoso-ent/odbcsdk/lib/
+ fi
+ elif test -f /usr/local/virtuoso-lite/odbcsdk/include/isql.h; then
+ VIRT_INCDIR=/usr/local/virtuoso-lite/odbcsdk/include/
+ if test -d /usr/local/virtuoso-lite/odbcsdk/lib64/; then
+ VIRT_LIBDIR=/usr/local/virtuoso-lite/odbcsdk/lib64/
+ else
+ VIRT_LIBDIR=/usr/local/virtuoso-lite/odbcsdk/lib/
+ fi
+ elif test -f /usr/local/virtuoso/odbcsdk/include/isql.h; then
+ VIRT_INCDIR=/usr/local/virtuoso/odbcsdk/include/
+ if test -d /usr/local/virtuoso/odbcsdk/lib64/; then
+ VIRT_LIBDIR=/usr/local/virtuoso/odbcsdk/lib64/
+ else
+ VIRT_LIBDIR=/usr/local/virtuoso/odbcsdk/lib/
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid OpenLink ODBC directory - unable to find isql.h)
+ fi
+ else
+ if test -f $withval/odbcsdk/include/isql.h; then
+ VIRT_INCDIR=$withval/odbcsdk/include/
+ if test -d $withval/odbcsdk/lib64/; then
+ VIRT_LIBDIR=$withval/odbcsdk/lib64/
+ else
+ VIRT_LIBDIR=$withval/odbcsdk/lib/
+ fi
+ elif test -f $withval/include/isql.h; then
+ VIRT_INCDIR=$withval/include/
+ if test -d $withval/lib64/; then
+ VIRT_LIBDIR=$withval/lib64/
+ else
+ VIRT_LIBDIR=$withval/lib/
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid OpenLink ODBC directory - unable to find isql.h under $withval)
+ fi
+ fi
+ VIRT_INCLUDE=-I$VIRT_INCDIR
+ VIRT_LFLAGS="-L$VIRT_LIBDIR -liodbc"
+
+ AC_DEFINE(HAVE_VIRT)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(VIRT_LFLAGS)
+AC_SUBST(VIRT_INCLUDE)
+
+
+AC_MSG_CHECKING(for EasySoft ODBC support)
+AC_ARG_WITH(easysoft,
+AC_HELP_STRING([--with-easysoft@<:@=DIR@:>], [Include EasySoft ODBC support. DIR is the base EasySoft ODBC install directory]),
+[
+ if test "$withval" != "no"; then
+ if test "$withval" = "yes"; then
+
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+
+ if test -f /usr/local/easysoft/oob/client/include/sql.h; then
+ EASYSOFT_INCDIR=/usr/local/easysoft/oob/client/include/
+ if test -d /usr/local/easysoft/oob/client/lib64/; then
+ EASYSOFT_LFLAGS="-L/usr/local/easysoft/oob/client/lib64/ -L/usr/local/easysoft/lib64"
+ else
+ EASYSOFT_LFLAGS="-L/usr/local/easysoft/oob/client/lib/ -L/usr/local/easysoft/lib"
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid EasySoft ODBC directory - unable to find sql.h)
+ fi
+ else
+ if test -f $withval/easysoft/oob/client/include/sql.h; then
+ EASYSOFT_INCDIR=$withval/easysoft/oob/client/include/
+ if test -d $withval/easysoft/oob/client/lib64/; then
+ EASYSOFT_LFLAGS="-L$withval/easysoft/oob/client/lib64/ -L$withval/easysoft/lib64"
+ else
+ EASYSOFT_LFLAGS="-L$withval/easysoft/oob/client/lib/ -L$withval/easysoft/lib"
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid EasySoft ODBC directory - unable to find sql.h under $withval)
+ fi
+ fi
+ EASYSOFT_INCLUDE=-I$EASYSOFT_INCDIR
+ EASYSOFT_LFLAGS="$EASYSOFT_LFLAGS -lesoobclient -lesrpc -lsupport -lextra"
+
+ AC_DEFINE(HAVE_EASYSOFT)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+
+ else
+ AC_MSG_RESULT(no)
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(EASYSOFT_LFLAGS)
+AC_SUBST(EASYSOFT_INCLUDE)
+
+
+
+AC_MSG_CHECKING(for InterBase support)
+AC_ARG_WITH(ibase,
+AC_HELP_STRING([--with-ibase@<:@=DIR@:>@], [Include InterBase support. DIR is the InterBase install directory @<:@default=/usr/interbase@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval=/usr/interbase
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/include/ibase.h; then
+ IBASE_INCDIR=$withval/include
+ if test -d $withval/lib64; then
+ IBASE_LIBDIR=$withval/lib64
+ else
+ IBASE_LIBDIR=$withval/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid InterBase directory - unable to find ibase.h)
+ fi
+ IBASE_LFLAGS="-L$IBASE_LIBDIR -lgds"
+ IBASE_INCLUDE=-I$IBASE_INCDIR
+ AC_DEFINE(HAVE_IBASE)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(IBASE_LFLAGS)
+AC_SUBST(IBASE_INCLUDE)
+
+AC_MSG_CHECKING(for Oracle8 support)
+AC_ARG_WITH(oracle8,
+AC_HELP_STRING([--with-oracle8@<:@=DIR@:>@], [Include Oracle8 support. DIR is the Oracle home directory @<:@default=$ORACLE_HOME or /oracle8/app/oracle/product/8.0.5@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval="$ORACLE_HOME"
+ if test "$withval" = ""; then
+ withval=/oracle8/app/oracle/product/8.0.5
+ fi
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/rdbms/demo/oci.h; then
+ ORACLE8_INCDIR1=$withval/rdbms/demo/
+ ORACLE8_INCDIR2=$withval/rdbms/public/:
+ ORACLE8_INCDIR3=$withval/network/public/
+ ORACLE8_INCDIR4=$withval/plsql/public/
+ if test -d $withval/lib64; then
+ ORACLE8_LIBDIR1=$withval/lib64
+ else
+ ORACLE8_LIBDIR1=$withval/lib
+ fi
+ if test -d $withval/rdbms/lib64; then
+ ORACLE8_LIBDIR2=$withval/rdbms/lib64
+ else
+ ORACLE8_LIBDIR2=$withval/rdbms/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid ORACLE directory - unable to find oci.h)
+ fi
+ if test -f $withval/lib64/libclntsh.so; then
+ ORACLE8_LFLAGS="-L$ORACLE8_LIBDIR1 -L$ORACLE8_LIBDIR2 $withval/lib64/libclntsh.so -lmm -lepc -lclient -lvsn -lcommon -lgeneric -lcore4 -lnlsrtl3 -lnsl -lm -ldl -lnetv2 -lnttcp -lnetwork -lncr -lsql"
+ else
+ ORACLE8_LFLAGS="-L$ORACLE8_LIBDIR1 -L$ORACLE8_LIBDIR2 $withval/lib/libclntsh.so -lmm -lepc -lclient -lvsn -lcommon -lgeneric -lcore4 -lnlsrtl3 -lnsl -lm -ldl -lnetv2 -lnttcp -lnetwork -lncr -lsql"
+ fi
+ ORACLE8_INCLUDE="-I$ORACLE8_INCDIR1 -I$ORACLE8_INCDIR2 -I$ORACLE8_INCDIR3 -I$ORACLE8_INCDIR4"
+ AC_DEFINE(HAVE_ORACLE8)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(ORACLE8_LFLAGS)
+AC_SUBST(ORACLE8_INCLUDE)
+
+
+AC_MSG_CHECKING(for Oracle7 support)
+AC_ARG_WITH(oracle7,
+AC_HELP_STRING([--with-oracle7@<:@=DIR@:>@], [Include Oracle 7.3 support. DIR is the Oracle home directory @<:@default=$ORACLE_HOME@:>@]),
+[
+ if test "$withval" != "no"; then
+ if test "$have_db" = "yes"; then
+ AC_MSG_RESULT(error)
+ AC_MSG_ERROR("You can configure for only one database.");
+ fi
+ fi
+
+ if test "$withval" = "yes"; then
+ withval="$ORACLE_HOME"
+ fi
+
+ if test "$withval" != "no"; then
+ if test -f $withval/rdbms/demo/ocidfn.h; then
+ ORACLE7_INCDIR=$withval/rdbms/demo/
+ if test -d $withval/lib64; then
+ ORACLE7_LIBDIR1=$withval/lib64
+ else
+ ORACLE7_LIBDIR1=$withval/lib
+ fi
+ if test -d $withval/rdbms/lib64; then
+ ORACLE7_LIBDIR2=$withval/rdbms/lib64
+ else
+ ORACLE7_LIBDIR2=$withval/rdbms/lib
+ fi
+ else
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR(Invalid ORACLE directory - unable to find ocidfn.h)
+ fi
+
+ ORACLEINST_TOP=$withval
+ if test -f "$ORACLEINST_TOP/rdbms/lib/sysliblist"
+ then
+ ORA_SYSLIB="`cat $ORACLEINST_TOP/rdbms/lib/sysliblist`"
+ elif test -f "$ORACLEINST_TOP/rdbms/lib64/sysliblist"
+ then
+ ORA_SYSLIB="`cat $ORACLEINST_TOP/rdbms/lib64/sysliblist`"
+ elif test -f "$ORACLEINST_TOP/lib/sysliblist"
+ then
+ ORA_SYSLIB="`cat $ORACLEINST_TOP/lib/sysliblist`"
+ elif test -f "$ORACLEINST_TOP/lib64/sysliblist"
+ then
+ ORA_SYSLIB="`cat $ORACLEINST_TOP/lib64/sysliblist`"
+ else
+ ORA_SYSLIB="-lm"
+ fi
+
+ ORACLE7_LFLAGS="-L$ORACLE7_LIBDIR1 -L$ORACLE7_LIBDIR2 \
+ -lclient -lsqlnet -lncr -lsqlnet -lclient -lcommon \
+ -lgeneric -lsqlnet -lncr -lsqlnet -lclient -lcommon -lgeneric \
+ -lepc -lnlsrtl3 -lc3v6 -lcore3 -lnlsrtl3 -lcore3 -lnlsrtl3 \
+ $ORA_SYSLIB -lcore3 $ORA_SYSLIB"
+ ORACLE7_INCLUDE="-I$ORACLE7_INCDIR "
+ AC_DEFINE(HAVE_ORACLE7)
+ AC_MSG_RESULT(yes)
+ have_db=yes
+ fi
+],[
+ AC_MSG_RESULT(no)
+])
+AC_SUBST(ORACLE7_LFLAGS)
+AC_SUBST(ORACLE7_INCLUDE)
+])
+
+
+AC_DEFUN([AM_CONDITIONAL],
+[AC_SUBST($1_TRUE)
+AC_SUBST($1_FALSE)
+if $2; then
+ $1_TRUE=
+ $1_FALSE='#'
+else
+ $1_TRUE='#'
+ $1_FALSE=
+fi])
+++ /dev/null
-dnl # Copyright © 2008 Tim Toolan <toolan@ele.uri.edu>
-dnl #
-dnl # Copying and distribution of this file, with or without modification,
-dnl # are permitted in any medium without royalty provided the copyright notice
-dnl # and this notice are preserved.
-
-dnl #########################################################################
-AC_DEFUN([AX_PATH_BDB], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_ok=no
-
- # Add --with-bdb-dir option to configure.
- AC_ARG_WITH([bdb-dir],
- [AC_HELP_STRING([--with-bdb-dir=DIR],
- [Berkeley DB installation directory])])
-
- # Check if --with-bdb-dir was specified.
- if test "x$with_bdb_dir" = "x" ; then
- # No option specified, so just search the system.
- AX_PATH_BDB_NO_OPTIONS([$1], [HIGHEST], [
- ax_path_bdb_ok=yes
- ])
- else
- # Set --with-bdb-dir option.
- ax_path_bdb_INC="$with_bdb_dir/include"
- ax_path_bdb_LIB="$with_bdb_dir/lib"
-
- dnl # Save previous environment, and modify with new stuff.
- ax_path_bdb_save_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="-I$ax_path_bdb_INC $CPPFLAGS"
-
- ax_path_bdb_save_LDFLAGS=$LDFLAGS
- LDFLAGS="-L$ax_path_bdb_LIB $LDFLAGS"
-
- # Check for specific header file db.h
- AC_MSG_CHECKING([db.h presence in $ax_path_bdb_INC])
- if test -f "$ax_path_bdb_INC/db.h" ; then
- AC_MSG_RESULT([yes])
- # Check for library
- AX_PATH_BDB_NO_OPTIONS([$1], [ENVONLY], [
- ax_path_bdb_ok=yes
- BDB_CPPFLAGS="-I$ax_path_bdb_INC"
- BDB_LDFLAGS="-L$ax_path_bdb_LIB"
- ])
- else
- AC_MSG_RESULT([no])
- AC_MSG_NOTICE([no usable Berkeley DB not found])
- fi
-
- dnl # Restore the environment.
- CPPFLAGS="$ax_path_bdb_save_CPPFLAGS"
- LDFLAGS="$ax_path_bdb_save_LDFLAGS"
-
- fi
-
- dnl # Execute ACTION-IF-FOUND / ACTION-IF-NOT-FOUND.
- if test "$ax_path_bdb_ok" = "yes" ; then
- m4_ifvaln([$2],[$2],[:])dnl
- m4_ifvaln([$3],[else $3])dnl
- fi
-
-]) dnl AX_PATH_BDB
-
-dnl #########################################################################
-dnl Check for berkeley DB of at least MINIMUM-VERSION on system.
-dnl
-dnl The OPTION argument determines how the checks occur, and can be one of:
-dnl
-dnl HIGHEST - Check both the environment and the default installation
-dnl directories for Berkeley DB and choose the version that
-dnl is highest. (default)
-dnl ENVFIRST - Check the environment first, and if no satisfactory
-dnl library is found there check the default installation
-dnl directories for Berkeley DB which is /usr/local/BerkeleyDB*
-dnl ENVONLY - Check the current environment only.
-dnl
-dnl Requires AX_PATH_BDB_PATH_GET_VERSION, AX_PATH_BDB_PATH_FIND_HIGHEST,
-dnl AX_PATH_BDB_ENV_CONFIRM_LIB, AX_PATH_BDB_ENV_GET_VERSION, and
-dnl AX_COMPARE_VERSION macros.
-dnl
-dnl Result: sets ax_path_bdb_no_options_ok to yes or no
-dnl sets BDB_LIBS, BDB_CPPFLAGS, BDB_LDFLAGS, BDB_VERSION
-dnl
-dnl AX_PATH_BDB_NO_OPTIONS([MINIMUM-VERSION], [OPTION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-AC_DEFUN([AX_PATH_BDB_NO_OPTIONS], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_no_options_ok=no
-
- # Values to add to environment to use Berkeley DB.
- BDB_VERSION=''
- BDB_LIBS=''
- BDB_CPPFLAGS=''
- BDB_LDFLAGS=''
-
- # Check cross compilation here.
- if test "x$cross_compiling" = "xyes" ; then
- # If cross compiling, can't use AC_RUN_IFELSE so do these tests.
- # The AC_PREPROC_IFELSE confirms that db.h is preprocessable,
- # and extracts the version number from it.
- AC_MSG_CHECKING([for db.h])
-
- AS_VAR_PUSHDEF([HEADER_VERSION],[ax_path_bdb_no_options_HEADER_VERSION])dnl
- HEADER_VERSION=''
- AC_PREPROC_IFELSE([
- AC_LANG_SOURCE([[
-#include <db.h>
-AX_PATH_BDB_STUFF DB_VERSION_MAJOR,DB_VERSION_MINOR,DB_VERSION_PATCH
- ]])
- ],[
- # Extract version from preprocessor output.
- HEADER_VERSION=`eval "$ac_cpp conftest.$ac_ext" 2> /dev/null \
- | grep AX_PATH_BDB_STUFF | sed 's/[[^0-9,]]//g;s/,/./g;1q'`
- ],[])
-
- if test "x$HEADER_VERSION" = "x" ; then
- AC_MSG_RESULT([no])
- else
- AC_MSG_RESULT([$HEADER_VERSION])
-
- # Check that version is high enough.
- AX_COMPARE_VERSION([$HEADER_VERSION],[ge],[$1],[
- # get major and minor version numbers
- AS_VAR_PUSHDEF([MAJ],[ax_path_bdb_no_options_MAJOR])dnl
- MAJ=`echo $HEADER_VERSION | sed 's,\..*,,'`
- AS_VAR_PUSHDEF([MIN],[ax_path_bdb_no_options_MINOR])dnl
- MIN=`echo $HEADER_VERSION | sed 's,^[[0-9]]*\.,,;s,\.[[0-9]]*$,,'`
-
- dnl # Save LIBS.
- ax_path_bdb_no_options_save_LIBS="$LIBS"
-
- # Check that we can link with the library.
- AC_SEARCH_LIBS([db_version],
- [db db-$MAJ.$MIN db$MAJ.$MIN db$MAJ$MIN db-$MAJ db$MAJ],[
- # Sucessfully found library.
- ax_path_bdb_no_options_ok=yes
- BDB_VERSION=$HEADER_VERSION
-
- # Extract library from LIBS
- ax_path_bdb_no_options_LEN=` \
- echo "x$ax_path_bdb_no_options_save_LIBS" \
- | awk '{print(length)}'`
- BDB_LIBS=`echo "x$LIBS " \
- | sed "s/.\{$ax_path_bdb_no_options_LEN\}\$//;s/^x//;s/ //g"`
- ],[])
-
- dnl # Restore LIBS
- LIBS="$ax_path_bdb_no_options_save_LIBS"
-
- AS_VAR_POPDEF([MAJ])dnl
- AS_VAR_POPDEF([MIN])dnl
- ])
- fi
-
- AS_VAR_POPDEF([HEADER_VERSION])dnl
- else
- # Not cross compiling.
- # Check version of Berkeley DB in the current environment.
- AX_PATH_BDB_ENV_GET_VERSION([
- AX_COMPARE_VERSION([$ax_path_bdb_env_get_version_VERSION],[ge],[$1],[
- # Found acceptable version in current environment.
- ax_path_bdb_no_options_ok=yes
- BDB_VERSION="$ax_path_bdb_env_get_version_VERSION"
- BDB_LIBS="$ax_path_bdb_env_get_version_LIBS"
- ])
- ])
-
- # Determine if we need to search /usr/local/BerkeleyDB*
- ax_path_bdb_no_options_DONE=no
- if test "x$2" = "xENVONLY" ; then
- ax_path_bdb_no_options_DONE=yes
- elif test "x$2" = "xENVFIRST" ; then
- ax_path_bdb_no_options_DONE=$ax_path_bdb_no_options_ok
- fi
-
- if test "$ax_path_bdb_no_options_DONE" = "no" ; then
- # Check for highest in /usr/local/BerkeleyDB*
- AX_PATH_BDB_PATH_FIND_HIGHEST([
- if test "$ax_path_bdb_no_options_ok" = "yes" ; then
- # If we already have an acceptable version use this if higher.
- AX_COMPARE_VERSION(
- [$ax_path_bdb_path_find_highest_VERSION],[gt],[$BDB_VERSION])
- else
- # Since we didn't have an acceptable version check if this one is.
- AX_COMPARE_VERSION(
- [$ax_path_bdb_path_find_highest_VERSION],[ge],[$1])
- fi
- ])
-
- dnl # If result from _AX_COMPARE_VERSION is true we want this version.
- if test "$ax_compare_version" = "true" ; then
- ax_path_bdb_no_options_ok=yes
- BDB_LIBS="-ldb"
- if test "x$ax_path_bdb_path_find_highest_DIR" != x ; then
- BDB_CPPFLAGS="-I$ax_path_bdb_path_find_highest_DIR/include"
- BDB_LDFLAGS="-L$ax_path_bdb_path_find_highest_DIR/lib"
- fi
- BDB_VERSION="$ax_path_bdb_path_find_highest_VERSION"
- fi
- fi
- fi
-
- dnl # Execute ACTION-IF-FOUND / ACTION-IF-NOT-FOUND.
- if test "$ax_path_bdb_no_options_ok" = "yes" ; then
- AC_MSG_NOTICE([using Berkeley DB version $BDB_VERSION])
- AC_DEFINE([HAVE_DB_H],[1],
- [Define to 1 if you have the <db.h> header file.])
- m4_ifvaln([$3],[$3])dnl
- else
- AC_MSG_NOTICE([no Berkeley DB version $1 or higher found])
- m4_ifvaln([$4],[$4])dnl
- fi
-]) dnl AX_PATH_BDB_NO_OPTIONS
-
-dnl #########################################################################
-dnl Check the default installation directory for Berkeley DB which is
-dnl of the form /usr/local/BerkeleyDB* for the highest version.
-dnl
-dnl Result: sets ax_path_bdb_path_find_highest_ok to yes or no,
-dnl sets ax_path_bdb_path_find_highest_VERSION to version,
-dnl sets ax_path_bdb_path_find_highest_DIR to directory.
-dnl
-dnl AX_PATH_BDB_PATH_FIND_HIGHEST([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-AC_DEFUN([AX_PATH_BDB_PATH_FIND_HIGHEST], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_path_find_highest_ok=no
-
- AS_VAR_PUSHDEF([VERSION],[ax_path_bdb_path_find_highest_VERSION])dnl
- VERSION=''
-
- ax_path_bdb_path_find_highest_DIR=''
-
- # find highest verison in default install directory for Berkeley DB
- AS_VAR_PUSHDEF([CURDIR],[ax_path_bdb_path_find_highest_CURDIR])dnl
- AS_VAR_PUSHDEF([CUR_VERSION],[ax_path_bdb_path_get_version_VERSION])dnl
-
- for CURDIR in `ls -d /usr/local/BerkeleyDB* 2> /dev/null`
- do
- AX_PATH_BDB_PATH_GET_VERSION([$CURDIR],[
- AX_COMPARE_VERSION([$CUR_VERSION],[gt],[$VERSION],[
- ax_path_bdb_path_find_highest_ok=yes
- ax_path_bdb_path_find_highest_DIR="$CURDIR"
- VERSION="$CUR_VERSION"
- ])
- ])
- done
-
- AS_VAR_POPDEF([VERSION])dnl
- AS_VAR_POPDEF([CUR_VERSION])dnl
- AS_VAR_POPDEF([CURDIR])dnl
-
- dnl # Execute ACTION-IF-FOUND / ACTION-IF-NOT-FOUND.
- if test "$ax_path_bdb_path_find_highest_ok" = "yes" ; then
- m4_ifvaln([$1],[$1],[:])dnl
- m4_ifvaln([$2],[else $2])dnl
- fi
-
-]) dnl AX_PATH_BDB_PATH_FIND_HIGHEST
-
-dnl #########################################################################
-dnl Checks for Berkeley DB in specified directory's lib and include
-dnl subdirectories.
-dnl
-dnl Result: sets ax_path_bdb_path_get_version_ok to yes or no,
-dnl sets ax_path_bdb_path_get_version_VERSION to version.
-dnl
-dnl AX_PATH_BDB_PATH_GET_VERSION(BDB-DIR, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-AC_DEFUN([AX_PATH_BDB_PATH_GET_VERSION], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_path_get_version_ok=no
-
- # Indicate status of checking for Berkeley DB header.
- AC_MSG_CHECKING([in $1/include for db.h])
- ax_path_bdb_path_get_version_got_header=no
- test -f "$1/include/db.h" && ax_path_bdb_path_get_version_got_header=yes
- AC_MSG_RESULT([$ax_path_bdb_path_get_version_got_header])
-
- # Indicate status of checking for Berkeley DB library.
- AC_MSG_CHECKING([in $1/lib for library -ldb])
-
- ax_path_bdb_path_get_version_VERSION=''
-
- if test -d "$1/include" && test -d "$1/lib" &&
- test "$ax_path_bdb_path_get_version_got_header" = "yes" ; then
- dnl # save and modify environment
- ax_path_bdb_path_get_version_save_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="-I$1/include $CPPFLAGS"
-
- ax_path_bdb_path_get_version_save_LIBS="$LIBS"
- LIBS="$LIBS -ldb"
-
- ax_path_bdb_path_get_version_save_LDFLAGS="$LDFLAGS"
- LDFLAGS="-L$1/lib $LDFLAGS"
-
- # Compile and run a program that compares the version defined in
- # the header file with a version defined in the library function
- # db_version.
- AC_RUN_IFELSE([
- AC_LANG_SOURCE([[
-#include <stdio.h>
-#include <db.h>
-int main(int argc,char **argv)
-{
- int major,minor,patch;
- (void) argv;
- db_version(&major,&minor,&patch);
- if (argc > 1)
- printf("%d.%d.%d\n",DB_VERSION_MAJOR,DB_VERSION_MINOR,DB_VERSION_PATCH);
- if (DB_VERSION_MAJOR == major && DB_VERSION_MINOR == minor &&
- DB_VERSION_PATCH == patch)
- return 0;
- else
- return 1;
-}
- ]])
- ],[
- # Program compiled and ran, so get version by adding argument.
- ax_path_bdb_path_get_version_VERSION=`./conftest$ac_exeext x`
- ax_path_bdb_path_get_version_ok=yes
- ],[],[])
-
- dnl # restore environment
- CPPFLAGS="$ax_path_bdb_path_get_version_save_CPPFLAGS"
- LIBS="$ax_path_bdb_path_get_version_save_LIBS"
- LDFLAGS="$ax_path_bdb_path_get_version_save_LDFLAGS"
- fi
-
- dnl # Finally, execute ACTION-IF-FOUND / ACTION-IF-NOT-FOUND.
- if test "$ax_path_bdb_path_get_version_ok" = "yes" ; then
- AC_MSG_RESULT([$ax_path_bdb_path_get_version_VERSION])
- m4_ifvaln([$2],[$2])dnl
- else
- AC_MSG_RESULT([no])
- m4_ifvaln([$3],[$3])dnl
- fi
-]) dnl AX_PATH_BDB_PATH_GET_VERSION
-
-#############################################################################
-dnl Checks if version of library and header match specified version.
-dnl Only meant to be used by AX_PATH_BDB_ENV_GET_VERSION macro.
-dnl
-dnl Requires AX_COMPARE_VERSION macro.
-dnl
-dnl Result: sets ax_path_bdb_env_confirm_lib_ok to yes or no.
-dnl
-dnl AX_PATH_BDB_ENV_CONFIRM_LIB(VERSION, [LIBNAME])
-AC_DEFUN([AX_PATH_BDB_ENV_CONFIRM_LIB], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_env_confirm_lib_ok=no
-
- dnl # save and modify environment to link with library LIBNAME
- ax_path_bdb_env_confirm_lib_save_LIBS="$LIBS"
- LIBS="$LIBS $2"
-
- # Compile and run a program that compares the version defined in
- # the header file with a version defined in the library function
- # db_version.
- AC_RUN_IFELSE([
- AC_LANG_SOURCE([[
-#include <stdio.h>
-#include <db.h>
-int main(int argc,char **argv)
-{
- int major,minor,patch;
- (void) argv;
- db_version(&major,&minor,&patch);
- if (argc > 1)
- printf("%d.%d.%d\n",DB_VERSION_MAJOR,DB_VERSION_MINOR,DB_VERSION_PATCH);
- if (DB_VERSION_MAJOR == major && DB_VERSION_MINOR == minor &&
- DB_VERSION_PATCH == patch)
- return 0;
- else
- return 1;
-}
- ]])
- ],[
- # Program compiled and ran, so get version by giving an argument,
- # which will tell the program to print the output.
- ax_path_bdb_env_confirm_lib_VERSION=`./conftest$ac_exeext x`
-
- # If the versions all match up, indicate success.
- AX_COMPARE_VERSION([$ax_path_bdb_env_confirm_lib_VERSION],[eq],[$1],[
- ax_path_bdb_env_confirm_lib_ok=yes
- ])
- ],[],[])
-
- dnl # restore environment
- LIBS="$ax_path_bdb_env_confirm_lib_save_LIBS"
-
-]) dnl AX_PATH_BDB_ENV_CONFIRM_LIB
-
-#############################################################################
-dnl Finds the version and library name for Berkeley DB in the
-dnl current environment. Tries many different names for library.
-dnl
-dnl Requires AX_PATH_BDB_ENV_CONFIRM_LIB macro.
-dnl
-dnl Result: set ax_path_bdb_env_get_version_ok to yes or no,
-dnl set ax_path_bdb_env_get_version_VERSION to the version found,
-dnl and ax_path_bdb_env_get_version_LIBNAME to the library name.
-dnl
-dnl AX_PATH_BDB_ENV_GET_VERSION([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-AC_DEFUN([AX_PATH_BDB_ENV_GET_VERSION], [
- dnl # Used to indicate success or failure of this function.
- ax_path_bdb_env_get_version_ok=no
-
- ax_path_bdb_env_get_version_VERSION=''
- ax_path_bdb_env_get_version_LIBS=''
-
- AS_VAR_PUSHDEF([HEADER_VERSION],[ax_path_bdb_env_get_version_HEADER_VERSION])dnl
- AS_VAR_PUSHDEF([TEST_LIBNAME],[ax_path_bdb_env_get_version_TEST_LIBNAME])dnl
-
- # Indicate status of checking for Berkeley DB library.
- AC_MSG_CHECKING([for db.h])
-
- # Compile and run a program that determines the Berkeley DB version
- # in the header file db.h.
- HEADER_VERSION=''
- AC_RUN_IFELSE([
- AC_LANG_SOURCE([[
-#include <stdio.h>
-#include <db.h>
-int main(int argc,char **argv)
-{
- (void) argv;
- if (argc > 1)
- printf("%d.%d.%d\n",DB_VERSION_MAJOR,DB_VERSION_MINOR,DB_VERSION_PATCH);
- return 0;
-}
- ]])
- ],[
- # Program compiled and ran, so get version by adding an argument.
- HEADER_VERSION=`./conftest$ac_exeext x`
- AC_MSG_RESULT([$HEADER_VERSION])
- ],[AC_MSG_RESULT([no])],[AC_MSG_RESULT([no])])
-
- # Have header version, so try to find corresponding library.
- # Looks for library names in the order:
- # nothing, db, db-X.Y, dbX.Y, dbXY, db-X, dbX
- # and stops when it finds the first one that matches the version
- # of the header file.
- if test "x$HEADER_VERSION" != "x" ; then
- AC_MSG_CHECKING([for library containing Berkeley DB $HEADER_VERSION])
-
- AS_VAR_PUSHDEF([MAJOR],[ax_path_bdb_env_get_version_MAJOR])dnl
- AS_VAR_PUSHDEF([MINOR],[ax_path_bdb_env_get_version_MINOR])dnl
-
- # get major and minor version numbers
- MAJOR=`echo $HEADER_VERSION | sed 's,\..*,,'`
- MINOR=`echo $HEADER_VERSION | sed 's,^[[0-9]]*\.,,;s,\.[[0-9]]*$,,'`
-
- # see if it is already specified in LIBS
- TEST_LIBNAME=''
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "db"
- TEST_LIBNAME='-ldb'
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "db-X.Y"
- TEST_LIBNAME="-ldb-${MAJOR}.$MINOR"
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "dbX.Y"
- TEST_LIBNAME="-ldb${MAJOR}.$MINOR"
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "dbXY"
- TEST_LIBNAME="-ldb$MAJOR$MINOR"
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "db-X"
- TEST_LIBNAME="-ldb-$MAJOR"
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- if test "$ax_path_bdb_env_confirm_lib_ok" = "no" ; then
- # try format "dbX"
- TEST_LIBNAME="-ldb$MAJOR"
- AX_PATH_BDB_ENV_CONFIRM_LIB([$HEADER_VERSION], [$TEST_LIBNAME])
- fi
-
- dnl # Found a valid library.
- if test "$ax_path_bdb_env_confirm_lib_ok" = "yes" ; then
- if test "x$TEST_LIBNAME" = "x" ; then
- AC_MSG_RESULT([none required])
- else
- AC_MSG_RESULT([$TEST_LIBNAME])
- fi
- ax_path_bdb_env_get_version_VERSION="$HEADER_VERSION"
- ax_path_bdb_env_get_version_LIBS="$TEST_LIBNAME"
- ax_path_bdb_env_get_version_ok=yes
- else
- AC_MSG_RESULT([no])
- fi
-
- AS_VAR_POPDEF([MAJOR])dnl
- AS_VAR_POPDEF([MINOR])dnl
- fi
-
- AS_VAR_POPDEF([HEADER_VERSION])dnl
- AS_VAR_POPDEF([TEST_LIBNAME])dnl
-
- dnl # Execute ACTION-IF-FOUND / ACTION-IF-NOT-FOUND.
- if test "$ax_path_bdb_env_confirm_lib_ok" = "yes" ; then
- m4_ifvaln([$1],[$1],[:])dnl
- m4_ifvaln([$2],[else $2])dnl
- fi
-
-]) dnl BDB_ENV_GET_VERSION
-
/* Define if you want to use embedded MySQL */
#undef HAVE_EMBEDDED_MYSQL
-/* Define if you want to use SQLite */
-#undef HAVE_SQLITE
-
/* Define if you want to use SQLite3 */
#undef HAVE_SQLITE3
-/* Define if you want to use Berkeley DB */
-#undef HAVE_BERKELEY_DB
-
-/* Define if you want to use mSQL */
-#undef HAVE_MSQL
-
-/* Define if you want to use iODBC */
-#undef HAVE_IODBC
-
-/* Define if you want to use unixODBC */
-#undef HAVE_UNIXODBC
-
-/* Define if you want to use Solid SQL Server */
-#undef HAVE_SOLID
-
-/* Define if you want to use OpenLink ODBC (Virtuoso) */
-#undef HAVE_VIRT
-
-/* Define if you want to use EasySoft ODBC */
-#undef HAVE_EASYSOFT
-
-/* Define if you want to use Interbase SQL Server */
-#undef HAVE_IBASE
-
-/* Define if you want to use Oracle 8 SQL Server */
-#undef HAVE_ORACLE8
-
-/* Define if you want to use Oracle 7 SQL Server */
-#undef HAVE_ORACLE7
-
-
/* ------------------------------------------------------------------------- */
/* -- CONFIGURE DETECTED FEATURES -- */
/* ------------------------------------------------------------------------- */
/* Define to 1 if utime.h exists and declares struct utimbuf. */
#undef HAVE_UTIME_H
-#if (HAVE_MYSQL||HAVE_POSTGRESQL||HAVE_MSQL||HAVE_IODBC||HAVE_UNIXODBC||HAVE_SOLID||HAVE_VIRT||HAVE_IBASE||HAVE_ORACLE8||HAVE_ORACLE7||HAVE_EASYSOFT)
-#define HAVE_SQL
-#endif
-
/* Data types */
#undef HAVE_U_INT
#undef HAVE_INTXX_T
/* Set if Bacula bat Qt4 GUI support enabled */
#undef HAVE_BAT
-/* Set if DB batch insert code enabled */
-#undef HAVE_BATCH_FILE_INSERT
-
/* Big Endian */
#undef HAVE_BIG_ENDIAN
/* Set if you have the DBI driver */
#undef HAVE_DBI
+/* Set if DBI DB batch insert code enabled */
+#undef HAVE_DBI_BATCH_FILE_INSERT
+
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
#undef HAVE_DCGETTEXT
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
#undef HAVE_DOPRNT
+/* Set if you have an Embedded MySQL Database */
+#undef HAVE_EMBEDDED_MYSQL
+
/* Define to 1 if you have the 'extattr_get_file' function. */
#undef HAVE_EXTATTR_GET_FILE
/* Set if you have an MySQL Database */
#undef HAVE_MYSQL
+/* Set if have mysql_thread_safe */
+#undef HAVE_MYSQL_THREAD_SAFE
+
/* Define to 1 if you have the `nanosleep' function. */
#undef HAVE_NANOSLEEP
/* Define if your printf() function supports format strings with positions. */
#undef HAVE_POSIX_PRINTF
+/* Set if you have an PostgreSQL Database */
+#undef HAVE_POSTGRESQL
+
+/* Set if PostgreSQL DB batch insert code enabled */
+#undef HAVE_POSTGRESQL_BATCH_FILE_INSERT
+
/* Set if have PQisthreadsafe */
#undef HAVE_PQISTHREADSAFE
/* Set if socklen_t exists */
#undef HAVE_SOCKLEN_T
+/* Set if you have an SQLite3 Database */
+#undef HAVE_SQLITE3
+
/* Set if have sqlite3_threadsafe */
#undef HAVE_SQLITE3_THREADSAFE
/* Define to 1 if you have the <term.h> header file. */
#undef HAVE_TERM_H
+/* Set if Thread Safe MySQL can be checked using mysql_thread_safe */
+#undef HAVE_THREAD_SAFE_MYSQL
+
/* Define if TLS support should be enabled */
#undef HAVE_TLS
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
#undef TM_IN_SYS_TIME
+/* Set if DB batch insert code enabled */
+#undef USE_BATCH_FILE_INSERT
+
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
/* ------------------------------------------------------------------------- */
/* -- CONFIGURE SPECIFIED FEATURES -- */
/* ------------------------------------------------------------------------- */
-
-/* Define if you want to use the lock manager */
-#undef _USE_LOCKMGR
/* Define if you want to use MySQL as Catalog database */
#undef USE_MYSQL_DB
/* Define if you want to use embedded MySQL */
#undef HAVE_EMBEDDED_MYSQL
-/* Define if you want to use SQLite */
-#undef HAVE_SQLITE
-
/* Define if you want to use SQLite3 */
#undef HAVE_SQLITE3
-/* Define if you want to use Berkeley DB */
-#undef HAVE_BERKELEY_DB
-
-/* Define if you want to use mSQL */
-#undef HAVE_MSQL
-
-/* Define if you want to use iODBC */
-#undef HAVE_IODBC
-
-/* Define if you want to use unixODBC */
-#undef HAVE_UNIXODBC
-
-/* Define if you want to use Solid SQL Server */
-#undef HAVE_SOLID
-
-/* Define if you want to use OpenLink ODBC (Virtuoso) */
-#undef HAVE_VIRT
-
-/* Define if you want to use EasySoft ODBC */
-#undef HAVE_EASYSOFT
-
-/* Define if you want to use Interbase SQL Server */
-#undef HAVE_IBASE
-
-/* Define if you want to use Oracle 8 SQL Server */
-#undef HAVE_ORACLE8
-
-/* Define if you want to use Oracle 7 SQL Server */
-#undef HAVE_ORACLE7
-
-
/* ------------------------------------------------------------------------- */
/* -- CONFIGURE DETECTED FEATURES -- */
/* ------------------------------------------------------------------------- */
#undef HAVE_GCC
/* Define if you have the Andrew File System. */
-#undef AFS
+#undef HAVE_AFS
/* Define If you want find -nouser and -nogroup to make tables of
used UIDs and GIDs at startup instead of using getpwuid or
/* Define to 1 if utime.h exists and declares struct utimbuf. */
#undef HAVE_UTIME_H
-#if (HAVE_MYSQL||HAVE_POSTGRESQL||HAVE_MSQL||HAVE_IODBC||HAVE_UNIXODBC||HAVE_SOLID||HAVE_VIRT||HAVE_IBASE||HAVE_ORACLE8||HAVE_ORACLE7||HAVE_EASYSOFT)
-#define HAVE_SQL
-#endif
-
/* Data types */
#undef HAVE_U_INT
#undef HAVE_INTXX_T
/* Define if you have zlib */
#undef HAVE_LIBZ
-/* Defines if your system have the sys/acl.h header file */
-#undef HAVE_SYS_ACL_H
-
/* Define if you have libacl */
#undef HAVE_ACL
-/* Define if you have extended acls */
-#undef HAVE_EXTENDED_ACL
-
-/* Defines if your system have the sys/xattr.h header file */
-#undef HAVE_SYS_XATTR_H
-
-/* Define if you have extended attributes */
-#undef HAVE_XATTR
-
-/* Define when you have extended attributes functions starting with l (like lstat) */
-#undef HAVE_LLISTXATTR
-#undef HAVE_LGETXATTR
-#undef HAVE_LSETXATTR
-
-/* Define when you have extended attributes functions not starting with l (like stat) */
-#undef HAVE_LISTXATTR
-#undef HAVE_GETXATTR
-#undef HAVE_SETXATTR
-
/* General libs */
#undef LIBS
#undef HAVE_OLD_SOCKOPT
-#undef HAVE_BIGENDIAN
-
/* Defined if Gtk+-2.4 or greater is present */
#undef HAVE_GTK_2_4
/* Define if language support is enabled */
#undef ENABLE_NLS
-
/* Define to 1 if the `closedir' function returns void instead of `int'. */
#undef CLOSEDIR_VOID
language is requested. */
#undef ENABLE_NLS
+/* Normal acl support */
+#undef HAVE_ACL
+
/* Define to 1 if you have `alloca', as a function or macro. */
#undef HAVE_ALLOCA
/* Define to 1 if you have the <assert.h> header file. */
#undef HAVE_ASSERT_H
+/* Defines if your system have the attr.h header file */
+#undef HAVE_ATTR_H
+
/* Set if Bacula bat Qt4 GUI support enabled */
#undef HAVE_BAT
-/* Set if DB batch insert code enabled */
-#undef HAVE_BATCH_FILE_INSERT
-
/* Define to 1 if you have the MacOS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
#undef HAVE_CFLOCALECOPYCURRENT
/* Set if you have the DBI driver */
#undef HAVE_DBI
+/* Set if DBI DB batch insert code enabled */
+#undef HAVE_DBI_BATCH_FILE_INSERT
+
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
#undef HAVE_DCGETTEXT
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
#undef HAVE_DOPRNT
+/* Define to 1 if you have the 'extattr_get_file' function. */
+#undef HAVE_EXTATTR_GET_FILE
+
+/* Define to 1 if you have the 'extattr_get_link' function. */
+#undef HAVE_EXTATTR_GET_LINK
+
+/* Define to 1 if you have the 'extattr_list_file' function. */
+#undef HAVE_EXTATTR_LIST_FILE
+
+/* Define to 1 if you have the 'extattr_list_link' function. */
+#undef HAVE_EXTATTR_LIST_LINK
+
+/* Define to 1 if you have the 'extattr_namespace_to_string' function. */
+#undef HAVE_EXTATTR_NAMESPACE_TO_STRING
+
+/* Define to 1 if you have the 'extattr_set_file' function. */
+#undef HAVE_EXTATTR_SET_FILE
+
+/* Define to 1 if you have the 'extattr_set_link' function. */
+#undef HAVE_EXTATTR_SET_LINK
+
+/* Define to 1 if you have the 'extattr_string_to_namespace' function. */
+#undef HAVE_EXTATTR_STRING_TO_NAMESPACE
+
+/* Extended acl support */
+#undef HAVE_EXTENDED_ACL
+
+/* AFS ACL support */
+#undef HAVE_AFS_ACL
+
/* Define to 1 if you have the `fchdir' function. */
#undef HAVE_FCHDIR
+/* Define to 1 if you have the 'fchownat' function. */
+#undef HAVE_FCHOWNAT
+
/* Define to 1 if you have the <fcntl.h> header file. */
#undef HAVE_FCNTL_H
/* Define to 1 if you have the `fseeko' function. */
#undef HAVE_FSEEKO
+/* Define to 1 if you have the 'fstatat' function. */
+#undef HAVE_FSTATAT
+
+/* Define to 1 if you have the 'futimesat' function. */
+#undef HAVE_FUTIMESAT
+
/* Define to 1 if you have the `fwprintf' function. */
#undef HAVE_FWPRINTF
/* Define to 1 if you have the `getuid' function. */
#undef HAVE_GETUID
+/* Define to 1 if you have the 'getxattr' function. */
+#undef HAVE_GETXATTR
+
/* Define to 1 if you have the <grp.h> header file. */
#undef HAVE_GRP_H
/* Define to 1 if you have the `inet_pton' function. */
#undef HAVE_INET_PTON
+/* Set if have Ingres Database */
+#undef HAVE_INGRES
+
/* Define if you have the 'intmax_t' type in <stdint.h> or <inttypes.h>. */
#undef HAVE_INTMAX_T
+/* Define to 1 if the system has the type `intptr_t'. */
+#undef HAVE_INTPTR_T
+
/* Define if <inttypes.h> exists and doesn't clash with <sys/types.h>. */
#undef HAVE_INTTYPES_H
declares uintmax_t. */
#undef HAVE_INTTYPES_H_WITH_UINTMAX
+/* Set if ioctl request is unsigned long int */
+#undef HAVE_IOCTL_ULINT_REQUEST
+
/* Whether to enable IPv6 support */
#undef HAVE_IPV6
/* Define if your <locale.h> file defines LC_MESSAGES. */
#undef HAVE_LC_MESSAGES
+/* Define to 1 if you have the 'lgetxattr' function. */
+#undef HAVE_LGETXATTR
+
+/* Define if you have libcap */
+#undef HAVE_LIBCAP
+
/* Define to 1 if you have the <libc.h> header file. */
#undef HAVE_LIBC_H
/* Define to 1 if you have the `util' library (-lutil). */
#undef HAVE_LIBUTIL
+/* Defines if your system have the libutil.h header file */
+#undef HAVE_LIBUTIL_H
+
/* Set to enable libwraper support */
#undef HAVE_LIBWRAP
/* Define to 1 if you have the <limits.h> header file. */
#undef HAVE_LIMITS_H
+/* Define to 1 if you have the 'listxattr' function. */
+#undef HAVE_LISTXATTR
+
+/* Define to 1 if you have the 'llistxattr' function. */
+#undef HAVE_LLISTXATTR
+
/* Define to 1 if you have the <locale.h> header file. */
#undef HAVE_LOCALE_H
/* Define if you have the 'long long' type. */
#undef HAVE_LONG_LONG
+/* Define to 1 if you have the 'lsetxattr' function. */
+#undef HAVE_LSETXATTR
+
/* Define to 1 if you have the `lstat' function. */
#undef HAVE_LSTAT
/* Define to 1 if you have the `munmap' function. */
#undef HAVE_MUNMAP
+/* Set if you have an MySQL Database */
+#undef HAVE_MYSQL
+
+/* Set if have mysql_thread_safe */
+#undef HAVE_MYSQL_THREAD_SAFE
+
/* Define to 1 if you have the `nanosleep' function. */
#undef HAVE_NANOSLEEP
/* Define to 1 if you have the <nl_types.h> header file. */
#undef HAVE_NL_TYPES_H
+/* Define to 1 if you have the 'nvlist_next_nvpair' function. */
+#undef HAVE_NVLIST_NEXT_NVPAIR
+
+/* Define to 1 if you have the 'openat' function. */
+#undef HAVE_OPENAT
+
/* Define if OpenSSL library is available */
#undef HAVE_OPENSSL
/* Define if the OpenSSL library is export-contrained to 128bit ciphers */
#undef HAVE_OPENSSL_EXPORT_LIBRARY
+/* Set if have OpenSSL version 1.x */
+#undef HAVE_OPENSSLv1
+
/* Define to 1 if you have the `posix_fadvise' function. */
#undef HAVE_POSIX_FADVISE
/* Define if your printf() function supports format strings with positions. */
#undef HAVE_POSIX_PRINTF
+/* Set if PostgreSQL DB batch insert code enabled */
+#undef HAVE_POSTGRESQL_BATCH_FILE_INSERT
+
+/* Set if have PQisthreadsafe */
+#undef HAVE_PQISTHREADSAFE
+
+/* Set if have PQputCopyData */
+#undef HAVE_PQ_COPY
+
+/* Define to 1 if you have the `prctl' function. */
+#undef HAVE_PRCTL
+
/* Define to 1 if you have the `putenv' function. */
#undef HAVE_PUTENV
/* Define to 1 if you have the <pwd.h> header file. */
#undef HAVE_PWD_H
-/* Set if bat QWT library found */
-#undef HAVE_QWT
-
/* Define to 1 if you have the `readdir_r' function. */
#undef HAVE_READDIR_R
/* Define to 1 if you have the <regex.h> header file. */
#undef HAVE_REGEX_H
-/* Define to 1 if you have the <resolv.h> header file. */
-#undef HAVE_RESOLV_H
-
/* Define if sa_len field exists in struct sockaddr */
#undef HAVE_SA_LEN
/* Define to 1 if you have the `setpgrp' function. */
#undef HAVE_SETPGRP
+/* Define to 1 if you have the `setreuid' function. */
+#undef HAVE_SETREUID
+
/* Define to 1 if you have the `setsid' function. */
#undef HAVE_SETSID
+/* Define to 1 if you have the 'setxattr' function. */
+#undef HAVE_SETXATTR
+
/* Define if the SHA-2 family of digest algorithms is available */
#undef HAVE_SHA2
/* Define to 1 if you have the `snprintf' function. */
#undef HAVE_SNPRINTF
+/* Set if socklen_t exists */
+#undef HAVE_SOCKLEN_T
+
+/* Set if have sqlite3_threadsafe */
+#undef HAVE_SQLITE3_THREADSAFE
+
/* Define to 1 if you have the <stdarg.h> header file. */
#undef HAVE_STDARG_H
`HAVE_STRUCT_STAT_ST_RDEV' instead. */
#undef HAVE_ST_RDEV
+/* Defines if your system have the sys/acl.h header file */
+#undef HAVE_SYS_ACL_H
+
+/* Defines if your system have the sys/attr.h header file */
+#undef HAVE_SYS_ATTR_H
+
/* Define to 1 if you have the <sys/bitypes.h> header file. */
#undef HAVE_SYS_BITYPES_H
/* Define to 1 if you have the <sys/byteorder.h> header file. */
#undef HAVE_SYS_BYTEORDER_H
+/* Define to 1 if you have the <sys/capability.h> header file. */
+#undef HAVE_SYS_CAPABILITY_H
+
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
*/
#undef HAVE_SYS_DIR_H
+/* Defines if your system have the sys/extattr.h header file */
+#undef HAVE_SYS_EXTATTR_H
+
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#undef HAVE_SYS_IOCTL_H
*/
#undef HAVE_SYS_NDIR_H
+/* Defines if your system have the sys/nvpair.h header file */
+#undef HAVE_SYS_NVPAIR_H
+
/* Define to 1 if you have the <sys/param.h> header file. */
#undef HAVE_SYS_PARAM_H
+/* Define to 1 if you have the <sys/prctl.h> header file. */
+#undef HAVE_SYS_PRCTL_H
+
/* Define to 1 if you have the <sys/select.h> header file. */
#undef HAVE_SYS_SELECT_H
/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
#undef HAVE_SYS_WAIT_H
+/* Defines if your system have the sys/xattr.h header file */
+#undef HAVE_SYS_XATTR_H
+
/* Define to 1 if you have the `tcgetattr' function. */
#undef HAVE_TCGETATTR
/* Define if you have the 'uintmax_t' type in <stdint.h> or <inttypes.h>. */
#undef HAVE_UINTMAX_T
+/* Define to 1 if the system has the type `uintptr_t'. */
+#undef HAVE_UINTPTR_T
+
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
+/* Define to 1 if you have the 'unlinkat' function. */
+#undef HAVE_UNLINKAT
+
/* Define if you have the 'unsigned long long' type. */
#undef HAVE_UNSIGNED_LONG_LONG
/* Define if you have the 'wint_t' type. */
#undef HAVE_WINT_T
+/* Extended Attributes support */
+#undef HAVE_XATTR
+
/* Define to 1 if you have the <zlib.h> header file. */
#undef HAVE_ZLIB_H
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
#undef TM_IN_SYS_TIME
+/* Set if DB batch insert code enabled */
+#undef USE_BATCH_FILE_INSERT
+
/* Define to 1 if the X Window System is missing or not being used. */
#undef X_DISPLAY_MISSING
/* Define for large files, on AIX-style hosts. */
#undef _LARGE_FILES
+/* Set if you want Lock Manager enabled */
+#undef _USE_LOCKMGR
+
/* Define to empty if `const' does not conform to ANSI C. */
#undef const
/* Define to `unsigned long' if <sys/types.h> does not define. */
#undef ino_t
+/* Define to the type of a signed integer type wide enough to hold a pointer,
+ if such a type exists, and if the system does not define it. */
+#undef intptr_t
+
/* Define to `int' if <sys/types.h> does not define. */
#undef major_t
/* Define to unsigned long or unsigned long long if <stdint.h> and
<inttypes.h> don't define. */
#undef uintmax_t
+
+/* Define to the type of an unsigned integer type wide enough to hold a
+ pointer, if such a type exists, and if the system does not define it. */
+#undef uintptr_t
dnl src/cats
dnl can be overwritten by specific values from version.h
LIBBACSQL_LT_RELEASE=`sed -n -e 's/^#.*LIBBACSQL_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h`
+LIBBACCATS_LT_RELEASE=`sed -n -e 's/^#.*LIBBACCATS_LT_RELEASE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h`
LIBBACSQL_LT_RELEASE=${LIBBACSQL_LT_RELEASE:-$VERSION}
+LIBBACCATS_LT_RELEASE=${LIBBACCATS_LT_RELEASE:-$VERSION}
AC_SUBST(LIBBACSQL_LT_RELEASE)dnl
+AC_SUBST(LIBBACCATS_LT_RELEASE)dnl
dnl src/findlib
dnl can be overwritten by specific values from version.h
AM_GNU_GETTEXT
fi
-support_mysql=no
-support_sqlite=no
-support_sqlite3=no
-support_postgresql=no
-support_ingres=no
-support_dbi=no
support_smartalloc=yes
support_readline=yes
support_conio=yes
build_client_only=no
build_dird=yes
build_stored=yes
-cats=
-db_type=Internal
+db_backends=""
support_lockmgr=no
-DB_TYPE=bdb
dnl --------------------------------------------------------------------------
dnl CHECKING COMMAND LINE OPTIONS
[
if test x$enableval = xyes; then
build_client_only=yes
- db_type=None
- DB_TYPE=none
+ db_backends="None"
+ DB_BACKENDS="none"
fi
]
)
dnl ------------------------------------------------
dnl Bacula check for various SQL database engines
dnl ------------------------------------------------
-SQL_LIB=
BA_CHECK_POSTGRESQL_DB
BA_CHECK_MYSQL_DB
-BA_CHECK_INGRES_DB
-
BA_CHECK_SQLITE3_DB
-# BA_CHECK_SQLITE_DB
+BA_CHECK_INGRES_DB
BA_CHECK_DBI_DB
BA_CHECK_DBI_DRIVER
-AC_SUBST(cats)
-AC_SUBST(DB_TYPE)
+dnl -------------------------------------------
+dnl Make sure at least one database backend is found
+dnl -------------------------------------------
+if test "x${db_backends}" = "x" ; then
+ echo " "
+ echo " "
+ echo "You have not specified either --enable-client-only or one of the"
+ echo "supported databases: MySQL, PostgreSQL, Ingres, SQLite3 or DBI."
+ echo "This is not permitted. Please reconfigure."
+ echo " "
+ echo "Aborting the configuration ..."
+ echo " "
+ echo " "
+ exit 1
+fi
+
+dnl -------------------------------------------
+dnl See how many catalog backends are configured.
+dnl -------------------------------------------
+case `echo $DB_BACKENDS | wc -w | sed -e 's/^ *//'` in
+ 1)
+ DEFAULT_DB_TYPE="${DB_BACKENDS}"
+ if test x$use_libtool = xno; then
+ SHARED_CATALOG_TARGETS=""
+ else
+ SHARED_CATALOG_TARGETS="libbaccats-${DEFAULT_DB_TYPE}.la"
+ fi
+ ;;
+ *)
+ dnl ------------------------------------------------
+ dnl Set the default backend to the first backend found
+ dnl ------------------------------------------------
+ DEFAULT_DB_TYPE=`echo ${DB_BACKENDS} | cut -d' ' -f1`
+
+ dnl ------------------------------------------------
+ dnl For multiple backend we need libtool support.
+ dnl ------------------------------------------------
+ if test x$use_libtool = xno; then
+ echo " "
+ echo " "
+ echo "You have specified two or more of the"
+ echo "supported databases: MySQL, PostgreSQL, Ingres, SQLite3 or DBI."
+ echo "This is not permitted when not using libtool Please reconfigure."
+ echo " "
+ echo "Aborting the configuration ..."
+ echo " "
+ echo " "
+ exit 1
+ fi
+
+ SHARED_CATALOG_TARGETS=""
+ for db_type in ${DB_BACKENDS}
+ do
+ if test -z "${SHARED_CATALOG_TARGETS}"; then
+ SHARED_CATALOG_TARGETS="libbaccats-${db_type}.la"
+ else
+ SHARED_CATALOG_TARGETS="${SHARED_CATALOG_TARGETS} libbaccats-${db_type}.la"
+ fi
+ done
+ ;;
+esac
+
+dnl -------------------------------------------
+dnl Unset DB_LIBS when using libtool as we link the
+dnl shared library using the right database lib no need to
+dnl set DB_LIBS which is only used for non shared versions
+dnl of the backends.
+dnl -------------------------------------------
+if test x$use_libtool = xyes; then
+ DB_LIBS=""
+fi
+
+AC_SUBST(DB_BACKENDS)
+AC_SUBST(DB_LIBS)
+AC_SUBST(DEFAULT_DB_TYPE)
+AC_SUBST(SHARED_CATALOG_TARGETS)
dnl -------------------------------------------
dnl enable batch attribute DB insert (default on)
dnl -------------------------------------------
-support_batch_insert=no
-A=`test -f $SQL_LIB && nm $SQL_LIB | grep pthread_mutex_lock`
-pkg=$?
-if test $pkg = 0; then
- support_batch_insert=yes
- AC_ARG_ENABLE(batch-insert,
- AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=no@:>@]),
- [
- if test x$enableval = xno; then
- support_batch_insert=no
- else
- support_batch_insert=yes
- fi
- ]
- )
+support_batch_insert=yes
+AC_ARG_ENABLE(batch-insert,
+ AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=yes@:>@]),
+ [
+ if test x$enableval = xno; then
+ support_batch_insert=no
+ fi
+ ]
+)
+
+if test x$support_batch_insert = xyes; then
+ AC_DEFINE(USE_BATCH_FILE_INSERT, 1, [Set if DB batch insert code enabled])
fi
-if test x$support_batch_insert = xno; then
- if test x$DB_TYPE = xmysql; then
- A=`test -f $MYSQL_LIBDIR/libmysqlclient_r.so && nm -D $MYSQL_LIBDIR/libmysqlclient_r.so | grep pthread_mutex_lock`
- pkg=$?
- if test $pkg = 0; then
- support_batch_insert=yes
- AC_ARG_ENABLE(batch-insert,
- AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=no@:>@]),
- [
- if test x$enableval = xno; then
- support_batch_insert=no
- else
- support_batch_insert=yes
- fi
- ]
- )
- fi
- fi
-fi
+dnl -------------------------------------------
+dnl Check if mysql supports batch mode
+dnl -------------------------------------------
+if test ! -z "$MYSQL_LIB"; then
+ AC_CHECK_LIB(mysql_r, mysql_thread_safe, AC_DEFINE(HAVE_MYSQL_THREAD_SAFE, 1, [Set if have mysql_thread_safe]))
+fi
+
+dnl -------------------------------------------
+dnl Check if sqlite supports batch mode
+dnl -------------------------------------------
+if test ! -z "SQLITE_LIB"; then
+ AC_CHECK_LIB(sqlite3, sqlite3_threadsafe, AC_DEFINE(HAVE_SQLITE3_THREADSAFE, 1, [Set if have sqlite3_threadsafe]))
+fi
-dnl For postgresql checking
-saved_LIBS="${LIBS}"
-LIBS="${saved_LIBS} ${SQL_LFLAGS}"
+dnl -------------------------------------------
+dnl Check if postgresql supports batch mode
+dnl -------------------------------------------
+if test ! -z "$POSTGRESQL_LIB"; then
+ dnl For postgresql checking
+ saved_LIBS="${LIBS}"
+ LIBS="${saved_LIBS} ${POSTGRESQL_LIB}"
-dnl Check if postgresql can support batch mode
-if test x$DB_TYPE = xpostgresql; then
- support_batch_insert=yes
AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE, 1, [Set if have PQisthreadsafe]))
AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY, 1, [Set if have PQputCopyData]))
- test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
- pkg=$?
- if test $pkg = 0; then
- AC_ARG_ENABLE(batch-insert,
- AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=no@:>@]),
- [
- if test x$enableval = xno; then
- support_batch_insert=no
- fi
- ]
- )
- else
- support_batch_insert=no
+ if test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"; then
+ if test $support_batch_insert = yes ; then
+ AC_DEFINE(HAVE_POSTGRESQL_BATCH_FILE_INSERT, 1, [Set if PostgreSQL DB batch insert code enabled])
+ fi
fi
+
+ if test x$ac_cv_lib_pq_PQisthreadsafe != xyes -a x$support_batch_insert = xyes
+ then
+ echo "WARNING: Your PostgreSQL client library is too old to detect "
+ echo "if it was compiled with --enable-thread-safety, consider to "
+ echo "upgrade it in order to avoid problems with Batch insert mode"
+ echo
+ fi
+
+ dnl Revert after postgresql checks
+ LIBS="${saved_LIBS}"
fi
-if test x$DB_TYPE = xdbi; then
- DB_TYPE=$DB_PROG
- db_type=$DB_PROG
- pkg=1
+dnl -------------------------------------------
+dnl Check if dbi supports batch mode
+dnl -------------------------------------------
+if test ! -z "$DBI_LIBS"; then
+ dnl -------------------------------------------
+ dnl Push the DB_PROG onto the stack of supported database backends for DBI
+ dnl -------------------------------------------
+ DB_BACKENDS="${DB_BACKENDS} ${DB_PROG}"
+
+ dnl -------------------------------------------
dnl Check for batch insert
+ dnl -------------------------------------------
if test $DB_PROG = postgresql; then
AC_CHECK_LIB(pq, PQisthreadsafe, AC_DEFINE(HAVE_PQISTHREADSAFE))
AC_CHECK_LIB(pq, PQputCopyData, AC_DEFINE(HAVE_PQ_COPY))
test "x$ac_cv_lib_pq_PQputCopyData" = "xyes"
pkg=$?
- fi
-
- if test $DB_PROG = mysql; then
- A=`test -f $SQL_LIB && nm $DB_PROG_LIB | grep pthread_mutex_lock`
- pkg=$?
- fi
-
- if test $DB_PROG = sqlite3; then
- A=`test -f $SQL_LIB && nm $DB_PROG_LIB | grep pthread_mutex_lock`
- pkg=$?
- AC_CHECK_LIB(sqlite3, sqlite3_threadsafe, AC_DEFINE(HAVE_SQLITE3_THREADSAFE, 1, [Set if have sqlite3_threadsafe]))
- fi
-
- if test $pkg = 0; then
- AC_ARG_ENABLE(batch-insert,
- AC_HELP_STRING([--enable-batch-insert], [enable the DB batch insert code @<:@default=no@:>@]),
- [
- if test x$enableval = xno; then
- support_batch_insert=no
- else
- support_batch_insert=yes
- fi
- ]
- )
+ if test $pkg = 0; then
+ if test $support_batch_insert = yes ; then
+ AC_DEFINE(HAVE_DBI_BATCH_FILE_INSERT, 1, [Set if DBI DB batch insert code enabled])
+ fi
+ fi
fi
else
+ dnl -------------------------------------------
dnl If dbi was not chosen, let the comment in file
+ dnl -------------------------------------------
uncomment_dbi="#"
fi
-dnl revert after postgresql checks
-LIBS="${saved_LIBS}"
-
AC_SUBST(uncomment_dbi)
-dnl For Ingres always enable batch inserts.
-if test x$DB_TYPE = xingres; then
- support_batch_insert=yes
-fi
-
-if test $support_batch_insert = yes ; then
- AC_DEFINE(HAVE_BATCH_FILE_INSERT, 1, [Set if DB batch insert code enabled])
-fi
-
AC_DEFINE(PROTOTYPES)
dnl --------------------------------------------------------------------------
CFLAGS="$CFLAGS -fno-strict-aliasing -fno-exceptions -fno-rtti"
fi
LDFLAGS=${LDFLAGS--O}
-DB_LIBS="${SQL_LFLAGS}"
CPPFLAGS="$CPPFLAGS"
CFLAGS="$CFLAGS"
AC_SUBST(DEBUG)
AC_SUBST(DEFS)
AC_SUBST(LIBS)
AC_SUBST(DLIB)
-AC_SUBST(DB_LIBS)
AC_SUBST(X_LIBS)
AC_SUBST(X_EXTRA_LIBS)
AC_SUBST(WCFLAGS)
echo " "
${MAKE:-make} clean
-if test "x${db_type}" = "xInternal" ; then
- echo " "
- echo " "
- echo "You have not specified either --enable-client-only or one of the"
- echo " supported databases: MySQL, PostgreSQL, Ingres, SQLite3 or DBI."
- echo " This is not permitted. Please reconfigure."
- echo " "
- echo "Aborting the configuration ..."
- echo " "
- echo " "
- exit 1
-fi
-
echo "
Configuration on `date`:
- Host: ${host}${post_host} -- ${DISTNAME} ${DISTVER}
- Bacula version: ${BACULA} ${VERSION} (${DATE})
- Source code location: ${srcdir}
- Install binaries: ${sbindir}
- Install libraries: ${libdir}
- Install config files: ${sysconfdir}
- Scripts directory: ${scriptdir}
- Archive directory: ${archivedir}
- Working directory: ${working_dir}
- PID directory: ${piddir}
- Subsys directory: ${subsysdir}
- Man directory: ${mandir}
- Data directory: ${datarootdir}
- Plugin directory: ${plugindir}
- C Compiler: ${CC} ${CCVERSION}
- C++ Compiler: ${CXX} ${CXXVERSION}
- Compiler flags: ${WCFLAGS} ${CFLAGS}
- Linker flags: ${WLDFLAGS} ${LDFLAGS}
- Libraries: ${LIBS}
- Statically Linked Tools: ${support_static_tools}
- Statically Linked FD: ${support_static_fd}
- Statically Linked SD: ${support_static_sd}
- Statically Linked DIR: ${support_static_dir}
- Statically Linked CONS: ${support_static_cons}
- Database type: ${db_type}
- Database port: ${db_port}
- Database lib: ${DB_LIBS}
- Database name: ${db_name}
- Database user: ${db_user}
-
- Job Output Email: ${job_email}
- Traceback Email: ${dump_email}
- SMTP Host Address: ${smtp_host}
+ Host: ${host}${post_host} -- ${DISTNAME} ${DISTVER}
+ Bacula version: ${BACULA} ${VERSION} (${DATE})
+ Source code location: ${srcdir}
+ Install binaries: ${sbindir}
+ Install libraries: ${libdir}
+ Install config files: ${sysconfdir}
+ Scripts directory: ${scriptdir}
+ Archive directory: ${archivedir}
+ Working directory: ${working_dir}
+ PID directory: ${piddir}
+ Subsys directory: ${subsysdir}
+ Man directory: ${mandir}
+ Data directory: ${datarootdir}
+ Plugin directory: ${plugindir}
+ C Compiler: ${CC} ${CCVERSION}
+ C++ Compiler: ${CXX} ${CXXVERSION}
+ Compiler flags: ${WCFLAGS} ${CFLAGS}
+ Linker flags: ${WLDFLAGS} ${LDFLAGS}
+ Libraries: ${LIBS}
+ Statically Linked Tools: ${support_static_tools}
+ Statically Linked FD: ${support_static_fd}
+ Statically Linked SD: ${support_static_sd}
+ Statically Linked DIR: ${support_static_dir}
+ Statically Linked CONS: ${support_static_cons}
+ Database backends: ${db_backends}
+ Database port: ${db_port}
+ Database name: ${db_name}
+ Database user: ${db_user}
- Director Port: ${dir_port}
- File daemon Port: ${fd_port}
- Storage daemon Port: ${sd_port}
+ Job Output Email: ${job_email}
+ Traceback Email: ${dump_email}
+ SMTP Host Address: ${smtp_host}
- Director User: ${dir_user}
- Director Group: ${dir_group}
- Storage Daemon User: ${sd_user}
- Storage DaemonGroup: ${sd_group}
- File Daemon User: ${fd_user}
- File Daemon Group: ${fd_group}
+ Director Port: ${dir_port}
+ File daemon Port: ${fd_port}
+ Storage daemon Port: ${sd_port}
- SQL binaries Directory ${SQL_BINDIR}
+ Director User: ${dir_user}
+ Director Group: ${dir_group}
+ Storage Daemon User: ${sd_user}
+ Storage DaemonGroup: ${sd_group}
+ File Daemon User: ${fd_user}
+ File Daemon Group: ${fd_group}
Large file support: $largefile_support
Bacula conio support: ${got_conio} ${CONS_LIBS}
chmod 755 scripts/bacula_config
cat config.out
-
-# Display a warning message if postgresql client lib is <= 8.1
-if test x$DB_TYPE = xpostgresql -a x$ac_cv_lib_pq_PQisthreadsafe != xyes \
- -a x$support_batch_insert = xyes
-then
- echo "WARNING: Your PostgreSQL client library is too old to detect "
- echo " if it was compiled with --enable-thread-safety, consider to"
- echo " upgrade it in order to avoid problems with Batch insert mode"
- echo
-fi
# this dir relative to top dir
thisdir = src/cats
-CPPFLAGS += -DBUILDING_CATS @DBI_DBD_DRIVERDIR@
+DEFS += -D_BDB_PRIV_INTERFACE_
+CPPFLAGS += @DBI_DBD_DRIVERDIR@
DEBUG=@DEBUG@
MKDIR=$(topdir)/autoconf/mkinstalldirs
-SQL_INC=@SQL_INCLUDE@
-
first_rule: all
dummy:
#
INCLUDE_FILES = cats.h protos.h sql_cmds.h
-LIBBACSQL_SRCS = mysql.c dbi.c \
- sql.c sql_cmds.c sql_create.c sql_delete.c sql_find.c \
- sql_get.c sql_list.c sql_update.c sqlite.c \
- postgresql.c \
- bvfs.c
+MYSQL_INCLUDE = @MYSQL_INCLUDE@
+MYSQL_LIBS = @MYSQL_LIBS@
+MYSQL_SRCS = mysql.c
+MYSQL_OBJS = $(MYSQL_SRCS:.c=.o)
+MYSQL_LOBJS = $(MYSQL_SRCS:.c=.lo)
+
+POSTGRESQL_INCLUDE = @POSTGRESQL_INCLUDE@
+POSTGRESQL_LIBS = @POSTGRESQL_LIBS@
+POSTGRESQL_SRCS = postgresql.c
+POSTGRESQL_OBJS = $(POSTGRESQL_SRCS:.c=.o)
+POSTGRESQL_LOBJS = $(POSTGRESQL_SRCS:.c=.lo)
+
+SQLITE_INCLUDE = @SQLITE_INCLUDE@
+SQLITE_LIBS = @SQLITE_LIBS@
+SQLITE_SRCS = sqlite.c
+SQLITE_OBJS = $(SQLITE_SRCS:.c=.o)
+SQLITE_LOBJS = $(SQLITE_SRCS:.c=.lo)
+
+INGRES_INCLUDE = @INGRES_INCLUDE@
+INGRES_LIBS = @INGRES_LIBS@
+INGRES_SRCS = ingres.c myingres.c
+INGRES_OBJS = $(INGRES_SRCS:.c=.o)
+INGRES_LOBJS = $(INGRES_SRCS:.c=.lo)
+
+DBI_INCLUDE = @DBI_INCLUDE@
+DBI_LIBS = @DBI_LIBS@
+DBI_SRCS = dbi.c
+DBI_OBJS = $(DBI_SRCS:.c=.o)
+DBI_LOBJS = $(DBI_SRCS:.c=.lo)
+
+DB_LIBS=@DB_LIBS@
+
+CATS_SRCS = mysql.c postgresql.c sqlite.c dbi.c ingres.c myingres.c
+LIBBACSQL_SRCS = bvfs.c cats.c sql.c sql_cmds.c sql_create.c sql_delete.c \
+ sql_find.c sql_get.c sql_glue.c sql_list.c sql_update.c
LIBBACSQL_OBJS = $(LIBBACSQL_SRCS:.c=.o)
+LIBBACCATS_OBJS = $(CATS_SRCS:.c=.o)
LIBBACSQL_LOBJS = $(LIBBACSQL_SRCS:.c=.lo)
LIBBACSQL_LT_RELEASE = @LIBBACSQL_LT_RELEASE@
+LIBBACCATS_LT_RELEASE = @LIBBACCATS_LT_RELEASE@
.SUFFIXES: .c .o .lo
.PHONY:
# inference rules
.c.o:
@echo "Compiling $<"
- $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQL_INC) $(DINCLUDE) $(CFLAGS) $<
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
.c.lo:
@echo "Compiling $<"
- $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQL_INC) $(DINCLUDE) $(CFLAGS) $<
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
+
+$(MYSQL_LOBJS):
+ @echo "Compiling $(@:.lo=.c)"
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c)
+
+$(POSTGRESQL_LOBJS):
+ @echo "Compiling $(@:.lo=.c)"
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c)
+
+$(SQLITE_LOBJS):
+ @echo "Compiling $(@:.lo=.c)"
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c)
+
+$(DBI_LOBJS):
+ @echo "Compiling $(@:.lo=.c)"
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c)
+
+$(INGRES_LOBJS):
+ @echo "Compiling $(@:.lo=.c)"
+ $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(INGRES_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.lo=.c)
+
+$(MYSQL_OBJS):
+ @echo "Compiling $(@:.o=.c)"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c)
+$(POSTGRESQL_OBJS):
+ @echo "Compiling $(@:.o=.c)"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c)
+
+$(SQLITE_OBJS):
+ @echo "Compiling $(@:.o=.c)"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c)
+
+$(INGRES_OBJS):
+ @echo "Compiling $(@:.o=.c)"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(INGRES_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c)
+
+$(DBI_OBJS):
+ @echo "Compiling $(@:.o=.c)"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $(DINCLUDE) $(CFLAGS) $(@:.o=.c)
#-------------------------------------------------------------------------
-all: Makefile libbacsql$(DEFAULT_ARCHIVE_TYPE)
- @echo "==== Make of sqllib is good ===="
+all: Makefile libbacsql$(DEFAULT_ARCHIVE_TYPE) libbaccats$(DEFAULT_ARCHIVE_TYPE) @SHARED_CATALOG_TARGETS@
+ @echo "==== Make of sqllibs is good ===="
@echo " "
esql:
$(NO_ECHO)$(II_SYSTEM)/ingres/bin/esqlcc -extension=h myingres.sh
libbacsql.a: $(LIBBACSQL_OBJS)
- @echo "Making $@ ..."
+ @echo "Making $@ ..."
$(AR) rc $@ $(LIBBACSQL_OBJS)
$(RANLIB) $@
+libbaccats.a: $(LIBBACCATS_OBJS)
+ @echo "Making $@ ..."
+ $(AR) rc $@ $(LIBBACCATS_OBJS)
+ $(RANLIB) $@
+
libbacsql.la: Makefile $(LIBBACSQL_LOBJS)
@echo "Making $@ ..."
$(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(LIBBACSQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACSQL_LT_RELEASE) $(DB_LIBS)
+libbaccats.la: Makefile cats_dummy.lo
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ cats_dummy.lo -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE)
+
+libbaccats-mysql.la: Makefile $(MYSQL_LOBJS)
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(MYSQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) $(MYSQL_LIBS)
+
+libbaccats-postgresql.la: Makefile $(POSTGRESQL_LOBJS)
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(POSTGRESQL_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) $(POSTGRESQL_LIBS)
+
+libbaccats-sqlite3.la: Makefile $(SQLITE_LOBJS)
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(SQLITE_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) $(SQLITE_LIBS)
+
+libbaccats-ingres.la: Makefile $(INGRES_LOBJS)
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(INGRES_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) $(INGRES_LIBS)
+
+libbaccats-dbi.la: Makefile $(DBI_LOBJS)
+ @echo "Making $@ ..."
+ $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -o $@ $(DBI_LOBJS) -export-dynamic -rpath $(libdir) -release $(LIBBACCATS_LT_RELEASE) $(DBI_LIBS)
+
Makefile: $(srcdir)/Makefile.in $(topdir)/config.status
cd $(topdir) \
&& CONFIG_FILES=$(thisdir)/$@ CONFIG_HEADERS= $(SHELL) ./config.status
libtool-install: all
$(MKDIR) $(DESTDIR)$(libdir)
- $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacsql$(DEFAULT_ARCHIVE_TYPE) $(DESTDIR)$(libdir)
+ $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbacsql.la $(DESTDIR)$(libdir)
+ $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbaccats.la $(DESTDIR)$(libdir)
+ for db_type in @DB_BACKENDS@; do \
+ $(LIBTOOL_INSTALL_FINISH) $(INSTALL_LIB) libbaccats-$${db_type}.la $(DESTDIR)$(libdir); \
+ done
+ $(CP) $(DESTDIR)$(libdir)/libbaccats-@DEFAULT_DB_TYPE@-$(LIBBACCATS_LT_RELEASE).so \
+ $(DESTDIR)$(libdir)/libbaccats-$(LIBBACCATS_LT_RELEASE).so
libtool-uninstall:
$(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsql.la
+ $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbaccats.la
+ for db_type in @DB_BACKENDS@; do \
+ $(LIBTOOL_UNINSTALL) $(RMF) $(DESTDIR)$(libdir)/libbacsql-$${db_type}.la; \
+ done
install: @LIBTOOL_INSTALL_TARGET@ @INCLUDE_INSTALL_TARGET@
- $(INSTALL_SCRIPT) create_@DB_TYPE@_database $(DESTDIR)$(scriptdir)/create_@DB_TYPE@_database
- $(INSTALL_SCRIPT) update_@DB_TYPE@_tables $(DESTDIR)$(scriptdir)/update_@DB_TYPE@_tables
- $(INSTALL_SCRIPT) make_@DB_TYPE@_tables $(DESTDIR)$(scriptdir)/make_@DB_TYPE@_tables
- $(INSTALL_SCRIPT) grant_@DB_TYPE@_privileges $(DESTDIR)$(scriptdir)/grant_@DB_TYPE@_privileges
- $(INSTALL_SCRIPT) drop_@DB_TYPE@_tables $(DESTDIR)$(scriptdir)/drop_@DB_TYPE@_tables
- $(INSTALL_SCRIPT) drop_@DB_TYPE@_database $(DESTDIR)$(scriptdir)/drop_@DB_TYPE@_database
+ for db_type in @DB_BACKENDS@; do \
+ if [ -f create_$${db_type}_database ]; then \
+ $(INSTALL_SCRIPT) create_$${db_type}_database $(DESTDIR)$(scriptdir)/create_$${db_type}_database; \
+ $(INSTALL_SCRIPT) update_$${db_type}_tables $(DESTDIR)$(scriptdir)/update_$${db_type}_tables; \
+ $(INSTALL_SCRIPT) make_$${db_type}_tables $(DESTDIR)$(scriptdir)/make_$${db_type}_tables; \
+ $(INSTALL_SCRIPT) grant_$${db_type}_privileges $(DESTDIR)$(scriptdir)/grant_$${db_type}_privileges; \
+ $(INSTALL_SCRIPT) drop_$${db_type}_tables $(DESTDIR)$(scriptdir)/drop_$${db_type}_tables; \
+ $(INSTALL_SCRIPT) drop_$${db_type}_database $(DESTDIR)$(scriptdir)/drop_$${db_type}_database; \
+ fi; \
+ done
$(INSTALL_SCRIPT) create_bacula_database $(DESTDIR)$(scriptdir)/create_bacula_database
$(INSTALL_SCRIPT) update_bacula_tables $(DESTDIR)$(scriptdir)/update_bacula_tables
$(INSTALL_SCRIPT) $$filename $(DESTDIR)$(scriptdir)/$$destname
uninstall: @LIBTOOL_UNINSTALL_TARGET@ @INCLUDE_UNINSTALL_TARGET@
- (cd $(DESTDIR)$(scriptdir); $(RMF) create_@DB_TYPE@_database)
- (cd $(DESTDIR)$(scriptdir); $(RMF) update_@DB_TYPE@_tables)
- (cd $(DESTDIR)$(scriptdir); $(RMF) make_@DB_TYPE@_tables)
- (cd $(DESTDIR)$(scriptdir); $(RMF) grant_@DB_TYPE@_privileges)
- (cd $(DESTDIR)$(scriptdir); $(RMF) drop_@DB_TYPE@_tables)
- (cd $(DESTDIR)$(scriptdir); $(RMF) drop_@DB_TYPE@_database)
+ @for db_type in @DB_BACKENDS@; do \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) create_$${db_type}_database); \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) update_$${db_type}_tables); \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) make_$${db_type}_tables); \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) grant_$${db_type}_privileges); \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_tables); \
+ (cd $(DESTDIR)$(scriptdir); $(RMF) drop_$${db_type}_database); \
+ done
(cd $(DESTDIR)$(scriptdir); $(RMF) create_bacula_database)
(cd $(DESTDIR)$(scriptdir); $(RMF) update_bacula_tables)
@$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile
@$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile
@for src in $(LIBBACSQL_SRCS); do \
- $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) $(SQL_INC) $$src >> Makefile; \
+ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $$src >> Makefile; \
+ done
+ @for src in $(MYSQL_SRCS); do \
+ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(MYSQL_INCLUDE) $$src >> Makefile; \
+ done
+ @for src in $(POSTGRESQL_SRCS); do \
+ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(POSTGRESQL_INCLUDE) $$src >> Makefile; \
+ done
+ @for src in $(SQLITE_SRCS); do \
+ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(SQLITE_INCLUDE) $$src >> Makefile; \
+ done
+ @for src in $(DBI_SRCS); do \
+ $(CXX) -S -M -MT `basename $$src .c`$(DEFAULT_OBJECT_TYPE) $(CPPFLAGS) $(XINC) $(DEFS) -I$(srcdir) -I$(basedir) $(DBI_INCLUDE) $$src >> Makefile; \
done
@if test -f Makefile ; then \
$(RMF) Makefile.bak; \
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_DBI_H_
+#define __BDB_DBI_H_ 1
+
+struct DBI_FIELD_GET {
+ dlink link;
+ char *value;
+};
+
+class B_DB_DBI: public B_DB_PRIV {
+private:
+ dbi_inst m_instance;
+ dbi_conn *m_db_handle;
+ dbi_result *m_result;
+ DBI_FIELD_GET *m_field_get;
+
+public:
+ B_DB_DBI(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert);
+ ~B_DB_DBI();
+
+ /* low level operations */
+ bool db_open_database(JCR *jcr);
+ void db_close_database(JCR *jcr);
+ void db_thread_cleanup(void);
+ void db_escape_string(JCR *jcr, char *snew, char *old, int len);
+ char *db_escape_object(JCR *jcr, char *old, int len);
+ void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+ void db_start_transaction(JCR *jcr);
+ void db_end_transaction(JCR *jcr);
+ bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+ void sql_free_result(void);
+ SQL_ROW sql_fetch_row(void);
+ bool sql_query(const char *query, int flags=0);
+ const char *sql_strerror(void);
+ int sql_num_rows(void);
+ void sql_data_seek(int row);
+ int sql_affected_rows(void);
+ uint64_t sql_insert_autokey_record(const char *query, const char *table_name);
+ void sql_field_seek(int field);
+ SQL_FIELD *sql_fetch_field(void);
+ int sql_num_fields(void);
+ bool sql_field_is_not_null(int field_type);
+ bool sql_field_is_numeric(int field_type);
+ bool sql_batch_start(JCR *jcr);
+ bool sql_batch_end(JCR *jcr, const char *error);
+ bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar);
+};
+
+#endif /* __BDB_DBI_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_INGRES_H_
+#define __BDB_INGRES_H_ 1
+
+class B_DB_INGRES: public B_DB_PRIV {
+private:
+ INGconn *m_db_handle;
+ INGresult *m_result;
+ bool m_explicit_commit;
+ int m_session_id;
+ alist *m_query_filters;
+
+public:
+ B_DB_INGRES(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert);
+ ~B_DB_INGRES();
+
+ /* low level operations */
+ bool db_open_database(JCR *jcr);
+ void db_close_database(JCR *jcr);
+ void db_thread_cleanup(void);
+ void db_escape_string(JCR *jcr, char *snew, char *old, int len);
+ char *db_escape_object(JCR *jcr, char *old, int len);
+ void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+ void db_start_transaction(JCR *jcr);
+ void db_end_transaction(JCR *jcr);
+ bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+ void sql_free_result(void);
+ SQL_ROW sql_fetch_row(void);
+ bool sql_query(const char *query, int flags=0);
+ const char *sql_strerror(void);
+ int sql_num_rows(void);
+ void sql_data_seek(int row);
+ int sql_affected_rows(void);
+ uint64_t sql_insert_autokey_record(const char *query, const char *table_name);
+ void sql_field_seek(int field);
+ SQL_FIELD *sql_fetch_field(void);
+ int sql_num_fields(void);
+ bool sql_field_is_not_null(int field_type);
+ bool sql_field_is_numeric(int field_type);
+ bool sql_batch_start(JCR *jcr);
+ bool sql_batch_end(JCR *jcr, const char *error);
+ bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar);
+};
+
+#endif /* __BDB_INGRES_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_MYSQL_H_
+#define __BDB_MYSQL_H_ 1
+
+class B_DB_MYSQL: public B_DB_PRIV {
+private:
+ MYSQL *m_db_handle;
+ MYSQL m_instance;
+ MYSQL_RES *m_result;
+
+public:
+ B_DB_MYSQL(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert);
+ ~B_DB_MYSQL();
+
+ /* low level operations */
+ bool db_open_database(JCR *jcr);
+ void db_close_database(JCR *jcr);
+ void db_thread_cleanup(void);
+ void db_escape_string(JCR *jcr, char *snew, char *old, int len);
+ char *db_escape_object(JCR *jcr, char *old, int len);
+ void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+ void db_start_transaction(JCR *jcr);
+ void db_end_transaction(JCR *jcr);
+ bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+ void sql_free_result(void);
+ SQL_ROW sql_fetch_row(void);
+ bool sql_query(const char *query, int flags=0);
+ const char *sql_strerror(void);
+ int sql_num_rows(void);
+ void sql_data_seek(int row);
+ int sql_affected_rows(void);
+ uint64_t sql_insert_autokey_record(const char *query, const char *table_name);
+ void sql_field_seek(int field);
+ SQL_FIELD *sql_fetch_field(void);
+ int sql_num_fields(void);
+ bool sql_field_is_not_null(int field_type);
+ bool sql_field_is_numeric(int field_type);
+ bool sql_batch_start(JCR *jcr);
+ bool sql_batch_end(JCR *jcr, const char *error);
+ bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar);
+};
+
+#endif /* __BDB_MYSQL_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_POSTGRESQL_H_
+#define __BDB_POSTGRESQL_H_ 1
+
+class B_DB_POSTGRESQL: public B_DB_PRIV {
+private:
+ PGconn *m_db_handle;
+ PGresult *m_result;
+
+public:
+ B_DB_POSTGRESQL(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert);
+ ~B_DB_POSTGRESQL();
+
+ /* low level operations */
+ bool db_open_database(JCR *jcr);
+ void db_close_database(JCR *jcr);
+ void db_thread_cleanup(void);
+ void db_escape_string(JCR *jcr, char *snew, char *old, int len);
+ char *db_escape_object(JCR *jcr, char *old, int len);
+ void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+ void db_start_transaction(JCR *jcr);
+ void db_end_transaction(JCR *jcr);
+ bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+ void sql_free_result(void);
+ SQL_ROW sql_fetch_row(void);
+ bool sql_query(const char *query, int flags=0);
+ const char *sql_strerror(void);
+ int sql_num_rows(void);
+ void sql_data_seek(int row);
+ int sql_affected_rows(void);
+ uint64_t sql_insert_autokey_record(const char *query, const char *table_name);
+ void sql_field_seek(int field);
+ SQL_FIELD *sql_fetch_field(void);
+ int sql_num_fields(void);
+ bool sql_field_is_not_null(int field_type);
+ bool sql_field_is_numeric(int field_type);
+ bool sql_batch_start(JCR *jcr);
+ bool sql_batch_end(JCR *jcr, const char *error);
+ bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar);
+};
+
+#endif /* __BDB_POSTGRESQL_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2011-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_PRIV_H_
+#define __BDB_PRIV_H_ 1
+
+#ifndef _BDB_PRIV_INTERFACE_
+#error "Illegal inclusion of catalog private interface"
+#endif
+
+/*
+ * Generic definition of a sql_row.
+ */
+typedef char ** SQL_ROW;
+
+/*
+ * Generic definition of a a sql_field.
+ */
+typedef struct sql_field {
+ char *name; /* name of column */
+ int max_length; /* max length */
+ uint32_t type; /* type */
+ uint32_t flags; /* flags */
+} SQL_FIELD;
+
+class B_DB_PRIV: public B_DB {
+protected:
+ int m_status; /* status */
+ int m_num_rows; /* number of rows returned by last query */
+ int m_num_fields; /* number of fields returned by last query */
+ int m_rows_size; /* size of malloced rows */
+ int m_fields_size; /* size of malloced fields */
+ int m_row_number; /* row number from xx_data_seek */
+ int m_field_number; /* field number from sql_field_seek */
+ SQL_ROW m_rows; /* defined rows */
+ SQL_FIELD *m_fields; /* defined fields */
+ bool m_allow_transactions; /* transactions allowed */
+ bool m_transaction; /* transaction started */
+
+public:
+ /* methods */
+ B_DB_PRIV() {};
+ virtual ~B_DB_PRIV() {};
+
+ int sql_num_rows(void) { return m_num_rows; };
+ void sql_field_seek(int field) { m_field_number = field; };
+ int sql_num_fields(void) { return m_num_fields; };
+ virtual void sql_free_result(void) = 0;
+ virtual SQL_ROW sql_fetch_row(void) = 0;
+ virtual bool sql_query(const char *query, int flags=0) = 0;
+ virtual const char *sql_strerror(void) = 0;
+ virtual void sql_data_seek(int row) = 0;
+ virtual int sql_affected_rows(void) = 0;
+ virtual uint64_t sql_insert_autokey_record(const char *query, const char *table_name) = 0;
+ virtual SQL_FIELD *sql_fetch_field(void) = 0;
+ virtual bool sql_field_is_not_null(int field_type) = 0;
+ virtual bool sql_field_is_numeric(int field_type) = 0;
+ virtual bool sql_batch_start(JCR *jcr) = 0;
+ virtual bool sql_batch_end(JCR *jcr, const char *error) = 0;
+ virtual bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar) = 0;
+};
+
+#endif /* __BDB_PRIV_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __BDB_SQLITE_H_
+#define __BDB_SQLITE_H_ 1
+
+class B_DB_SQLITE: public B_DB_PRIV {
+private:
+ struct sqlite3 *m_db_handle;
+ char **m_result;
+ char *m_sqlite_errmsg;
+
+public:
+ B_DB_SQLITE(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert);
+ ~B_DB_SQLITE();
+
+ /* low level operations */
+ bool db_open_database(JCR *jcr);
+ void db_close_database(JCR *jcr);
+ void db_thread_cleanup(void);
+ void db_escape_string(JCR *jcr, char *snew, char *old, int len);
+ char *db_escape_object(JCR *jcr, char *old, int len);
+ void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+ void db_start_transaction(JCR *jcr);
+ void db_end_transaction(JCR *jcr);
+ bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+ void sql_free_result(void);
+ SQL_ROW sql_fetch_row(void);
+ bool sql_query(const char *query, int flags=0);
+ const char *sql_strerror(void);
+ int sql_num_rows(void);
+ void sql_data_seek(int row);
+ int sql_affected_rows(void);
+ uint64_t sql_insert_autokey_record(const char *query, const char *table_name);
+ void sql_field_seek(int field);
+ SQL_FIELD *sql_fetch_field(void);
+ int sql_num_fields(void);
+ bool sql_field_is_not_null(int field_type);
+ bool sql_field_is_numeric(int field_type);
+ bool sql_batch_start(JCR *jcr);
+ bool sql_batch_end(JCR *jcr, const char *error);
+ bool sql_batch_insert(JCR *jcr, ATTR_DBR *ar);
+};
+
+#endif /* __BDB_SQLITE_H_ */
#define __SQL_C /* indicate that this is sql.c */
#include "bacula.h"
-#include "cats/cats.h"
+
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
#include "lib/htable.h"
#include "bvfs.h"
int len = strlen(pattern);
query.check_size(len*2+1);
db_escape_string(jcr, db, query.c_str(), pattern, len);
- Mmsg(filter, " AND Path2.Path %s '%s' ", SQL_MATCH, query.c_str());
+ Mmsg(filter, " AND Path2.Path %s '%s' ", match_query[db_get_type_index(db)], query.c_str());
}
if (!dir_filenameid) {
db_lock(db);
db_sql_query(db, query.c_str(), path_handler, this);
- nb_record = db->num_rows;
+ nb_record = sql_num_rows(db);
db_unlock(db);
return nb_record == limit;
const char *JobId, const char *PathId,
const char *filter, int64_t limit, int64_t offset)
{
- if (db_type == SQL_TYPE_POSTGRESQL) {
- Mmsg(query, sql_bvfs_list_files[db_type],
+ if (db_get_type_index(db) == SQL_TYPE_POSTGRESQL) {
+ Mmsg(query, sql_bvfs_list_files[db_get_type_index(db)],
JobId, PathId, JobId, PathId,
filter, limit, offset);
} else {
- Mmsg(query, sql_bvfs_list_files[db_type],
+ Mmsg(query, sql_bvfs_list_files[db_get_type_index(db)],
JobId, PathId, JobId, PathId,
limit, offset, filter, JobId, JobId);
}
int len = strlen(pattern);
query.check_size(len*2+1);
db_escape_string(jcr, db, query.c_str(), pattern, len);
- Mmsg(filter, " AND Filename.Name %s '%s' ", SQL_MATCH, query.c_str());
+ Mmsg(filter, " AND Filename.Name %s '%s' ", match_query[db_get_type_index(db)], query.c_str());
}
build_ls_files_query(db, query,
db_lock(db);
db_sql_query(db, query.c_str(), list_entries, user_data);
- nb_record = db->num_rows;
+ nb_record = sql_num_rows(db);
db_unlock(db);
return nb_record == limit;
}
/* TODO: handle basejob and SQLite3 */
- Mmsg(query, sql_bvfs_select[db_type], output_table, output_table);
+ Mmsg(query, sql_bvfs_select[db_get_type_index(db)], output_table, output_table);
/* TODO: handle jobid filter */
Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
}
/* MySQL need it */
- if (db_type == SQL_TYPE_MYSQL) {
+ if (db_get_type_index(db) == SQL_TYPE_MYSQL) {
Mmsg(query, "CREATE INDEX idx_%s ON b2%s (JobId)",
output_table, output_table);
}
db_sql_query(db, query.c_str(), NULL, NULL);
return ret;
}
+
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2011-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Generic catalog class methods.
+ *
+ * Written by Marco van Wieringen, January 2011
+ */
+
+#include "bacula.h"
+
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
+
+bool B_DB::db_match_database(const char *db_driver, const char *db_name,
+ const char *db_address, int db_port)
+{
+ if (bstrcmp(m_db_driver, db_driver) &&
+ bstrcmp(m_db_name, db_name) &&
+ bstrcmp(m_db_address, db_address) &&
+ m_db_port == db_port) {
+ return true;
+ }
+
+ return false;
+}
+
+B_DB *B_DB::db_clone_database_connection(JCR *jcr, bool mult_db_connections)
+{
+ /*
+ * See if its a simple clone e.g. with mult_db_connections set to false
+ * then we just return the calling class pointer.
+ */
+ if (!mult_db_connections) {
+ m_ref_count++;
+ return this;
+ }
+
+ /*
+ * A bit more to do here just open a new session to the database.
+ */
+ return db_init_database(jcr, m_db_driver, m_db_name, m_db_user, m_db_password,
+ m_db_address, m_db_port, m_db_socket, true, m_disabled_batch_insert);
+}
+
+const char *B_DB::db_get_type(void)
+{
+ switch (m_db_interface_type) {
+ case SQL_INTERFACE_TYPE_MYSQL:
+ return "MySQL";
+ case SQL_INTERFACE_TYPE_POSTGRESQL:
+ return "PostgreSQL";
+ case SQL_INTERFACE_TYPE_SQLITE3:
+ return "SQLite3";
+ case SQL_INTERFACE_TYPE_INGRES:
+ return "Ingres";
+ case SQL_INTERFACE_TYPE_DBI:
+ switch (m_db_type) {
+ case SQL_TYPE_MYSQL:
+ return "DBI:MySQL";
+ case SQL_TYPE_POSTGRESQL:
+ return "DBI:PostgreSQL";
+ case SQL_TYPE_SQLITE3:
+ return "DBI:SQLite3";
+ case SQL_TYPE_INGRES:
+ return "DBI:Ingres";
+ default:
+ return "DBI:Unknown";
+ }
+ default:
+ return "Unknown";
+ }
+}
+
+/*
+ * Lock database, this can be called multiple times by the same
+ * thread without blocking, but must be unlocked the number of
+ * times it was locked using db_unlock().
+ */
+void B_DB::_db_lock(const char *file, int line)
+{
+ int errstat;
+
+ if ((errstat = rwl_writelock_p(&m_lock, file, line)) != 0) {
+ berrno be;
+ e_msg(file, line, M_FATAL, 0, "rwl_writelock failure. stat=%d: ERR=%s\n",
+ errstat, be.bstrerror(errstat));
+ }
+}
+
+/*
+ * Unlock the database. This can be called multiple times by the
+ * same thread up to the number of times that thread called
+ * db_lock()/
+ */
+void B_DB::_db_unlock(const char *file, int line)
+{
+ int errstat;
+
+ if ((errstat = rwl_writeunlock(&m_lock)) != 0) {
+ berrno be;
+ e_msg(file, line, M_FATAL, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n",
+ errstat, be.bstrerror(errstat));
+ }
+}
+
+bool B_DB::db_sql_query(const char *query, int flags)
+{
+ bool retval;
+
+ db_lock(this);
+ retval = ((B_DB_PRIV *)this)->sql_query(query, flags);
+ if (!retval) {
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, ((B_DB_PRIV *)this)->sql_strerror());
+ }
+ db_unlock(this);
+ return retval;
+}
+
+void B_DB::print_lock_info(FILE *fp)
+{
+ if (m_lock.valid == RWLOCK_VALID) {
+ fprintf(fp, "\tRWLOCK=%p w_active=%i w_wait=%i\n", &m_lock, m_lock.w_active, m_lock.w_wait);
+ }
+}
+
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
Switzerland, email:ftf@fsfeurope.org.
*/
/*
- * SQL header file
+ * Catalog header file
*
- * by Kern E. Sibbald
- *
- * Anyone who accesses the database will need to include
- * this file.
- *
- * This file contains definitions common to sql.c and
- * the external world, and definitions destined only
- * for the external world. This is control with
- * the define __SQL_C, which is defined only in sql.c
+ * by Kern E. Sibbald
*
+ * Anyone who accesses the database will need to include
+ * this file.
*/
/*
am 100% sure there will be no more changes, the update script
will be copied to the updatedb directory with the correct name
(in the present case 8 to 9).
-
- Now, in principle, each of the different DB implementations
- can have a different version, but in practice they are all
- the same (simplifies things). The exception is the internal
- database, which is no longer used, and hence, no longer changes.
- */
-
-
-#ifndef __SQL_H_
-#define __SQL_H_ 1
-
-enum {
- SQL_TYPE_MYSQL = 0,
- SQL_TYPE_POSTGRESQL = 1,
- SQL_TYPE_SQLITE = 2,
- SQL_TYPE_SQLITE3 = 3,
- SQL_TYPE_INGRES = 4
-};
-
-
-typedef void (DB_LIST_HANDLER)(void *, const char *);
-typedef int (DB_RESULT_HANDLER)(void *, int, char **);
-
-#define db_lock(mdb) _db_lock(__FILE__, __LINE__, mdb)
-#define db_unlock(mdb) _db_unlock(__FILE__, __LINE__, mdb)
-
-#ifdef __SQL_C
-
-/* Current database version number for all drivers */
-#define BDB_VERSION 13
-
-
-#if defined(BUILDING_CATS)
-#ifdef HAVE_SQLITE
-#error "SQLite2 is now deprecated, use SQLite3 instead."
-
-#include <sqlite.h>
-
-/* Define opaque structure for sqlite */
-struct sqlite {
- char dummy;
-};
-
-#define IS_NUM(x) ((x) == 1)
-#define IS_NOT_NULL(x) ((x) == 1)
-
-typedef struct s_sql_field {
- char *name; /* name of column */
- int length; /* length */
- int max_length; /* max length */
- uint32_t type; /* type */
- uint32_t flags; /* flags */
-} SQL_FIELD;
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- * S Q L I T E
*/
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- struct sqlite *db;
- char **result;
- int status;
- int nrow; /* nrow returned from sqlite */
- int ncolumn; /* ncolum returned from sqlite */
- int num_rows; /* used by code */
- int row; /* seek row */
- int field; /* seek field */
- SQL_FIELD **fields; /* defined fields */
- int ref_count;
- char *db_name;
- char *db_user;
- char *db_address; /* host name address */
- char *db_socket; /* socket for local access */
- char *db_password;
- int db_port; /* port for host name address */
- bool connected; /* connection made to db */
- bool fields_defined; /* set when fields defined */
- char *sqlite_errmsg; /* error message returned by sqlite */
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path; /* cached path name */
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id; /* cached path id */
- bool allow_transactions; /* transactions allowed */
- bool transaction; /* transaction started */
- int changes; /* changes during transaction */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- POOLMEM *esc_obj; /* Escaped restore object */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-
-/*
- * "Generic" names for easier conversion
- *
- * S Q L I T E
- */
-#define sql_store_result(x) (x)->result
-#define sql_free_result(x) my_sqlite_free_table(x)
-#define sql_fetch_row(x) my_sqlite_fetch_row(x)
-#define sql_query(x, y) my_sqlite_query((x), (y))
-#define sql_insert_autokey_record(x, y, z) my_sqlite_insert_autokey_record((x), (y), (z))
-#ifdef HAVE_SQLITE3
-#define sql_close(x) sqlite3_close((x)->db)
-#define sql_affected_rows(x) sqlite3_changes((x)->db)
-#else
-#define sql_close(x) sqlite_close((x)->db)
-#define sql_affected_rows(x) 1
-#endif
-#define sql_strerror(x) (x)->sqlite_errmsg?(x)->sqlite_errmsg:"unknown"
-#define sql_num_rows(x) (x)->nrow
-#define sql_data_seek(x, i) (x)->row = (i)
-#define sql_field_seek(x, y) my_sqlite_field_seek((x), (y))
-#define sql_fetch_field(x) my_sqlite_fetch_field(x)
-#define sql_num_fields(x) ((x)->ncolumn)
-#define SQL_ROW char**
-#define SQL_MATCH "MATCH"
-
-#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
-#define sql_batch_lock_path_query my_sqlite_batch_lock_query
-#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
-#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
-#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
-#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
-
-/* In cats/sqlite.c */
-void my_sqlite_free_table(B_DB *mdb);
-SQL_ROW my_sqlite_fetch_row(B_DB *mdb);
-int my_sqlite_query(B_DB *mdb, const char *cmd);
-void my_sqlite_field_seek(B_DB *mdb, int field);
-SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
-uint64_t my_sqlite_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name);
-extern const char* my_sqlite_batch_lock_query;
-extern const char* my_sqlite_batch_unlock_query;
-extern const char* my_sqlite_batch_fill_filename_query;
-extern const char* my_sqlite_batch_fill_path_query;
-
-
-#else
-
-/* S Q L I T E 3 */
-
-
-#ifdef HAVE_SQLITE3
-
-#include <sqlite3.h>
-
-/* Define opaque structure for sqlite */
-struct sqlite3 {
- char dummy;
-};
-
-#define IS_NUM(x) ((x) == 1)
-#define IS_NOT_NULL(x) ((x) == 1)
-
-typedef struct s_sql_field {
- char *name; /* name of column */
- int length; /* length */
- int max_length; /* max length */
- uint32_t type; /* type */
- uint32_t flags; /* flags */
-} SQL_FIELD;
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- * S Q L I T E
- */
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- struct sqlite3 *db;
- char **result;
- int status;
- int nrow; /* nrow returned from sqlite */
- int ncolumn; /* ncolum returned from sqlite */
- int num_rows; /* used by code */
- int row; /* seek row */
- int field; /* seek field */
- SQL_FIELD **fields; /* defined fields */
- int ref_count;
- char *db_name;
- char *db_user;
- char *db_address; /* host name address */
- char *db_socket; /* socket for local access */
- char *db_password;
- int db_port; /* port for host name address */
- bool connected; /* connection made to db */
- bool fields_defined; /* set when fields defined */
- char *sqlite_errmsg; /* error message returned by sqlite */
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path; /* cached path name */
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id; /* cached path id */
- bool allow_transactions; /* transactions allowed */
- bool transaction; /* transaction started */
- int changes; /* changes during transaction */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- POOLMEM *esc_obj; /* Escaped restore object */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-/*
- * Conversion of sqlite 2 names to sqlite3
- */
-#define sqlite_last_insert_rowid sqlite3_last_insert_rowid
-#define sqlite_open sqlite3_open
-#define sqlite_close sqlite3_close
-#define sqlite_result sqlite3_result
-#define sqlite_exec sqlite3_exec
-#define sqlite_get_table sqlite3_get_table
-#define sqlite_free_table sqlite3_free_table
-
-
-/*
- * "Generic" names for easier conversion
- *
- * S Q L I T E 3
- */
-#define sql_store_result(x) (x)->result
-#define sql_free_result(x) my_sqlite_free_table(x)
-#define sql_fetch_row(x) my_sqlite_fetch_row(x)
-#define sql_query(x, y) my_sqlite_query((x), (y))
-#define sql_insert_autokey_record(x, y, z) my_sqlite_insert_autokey_record((x), (y), (z))
-#ifdef HAVE_SQLITE3
-#define sql_close(x) sqlite3_close((x)->db)
-#else
-#define sql_close(x) sqlite_close((x)->db)
-#endif
-#define sql_strerror(x) (x)->sqlite_errmsg?(x)->sqlite_errmsg:"unknown"
-#define sql_num_rows(x) (x)->nrow
-#define sql_data_seek(x, i) (x)->row = (i)
-#define sql_affected_rows(x) sqlite3_changes((x)->db)
-#define sql_field_seek(x, y) my_sqlite_field_seek((x), (y))
-#define sql_fetch_field(x) my_sqlite_fetch_field(x)
-#define sql_num_fields(x) ((x)->ncolumn)
-#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
-#define SQL_ROW char**
-#define SQL_MATCH "MATCH"
-#define sql_batch_lock_path_query my_sqlite_batch_lock_query
-#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
-#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
-#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
-#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
-
-/* In cats/sqlite.c */
-void my_sqlite_free_table(B_DB *mdb);
-SQL_ROW my_sqlite_fetch_row(B_DB *mdb);
-int my_sqlite_query(B_DB *mdb, const char *cmd);
-void my_sqlite_field_seek(B_DB *mdb, int field);
-SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
-uint64_t my_sqlite_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name);
-extern const char* my_sqlite_batch_lock_query;
-extern const char* my_sqlite_batch_unlock_query;
-extern const char* my_sqlite_batch_fill_filename_query;
-extern const char* my_sqlite_batch_fill_path_query;
-
-
-#else
-
-#ifdef HAVE_MYSQL
-
-#include <mysql.h>
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- *
- * M Y S Q L
- */
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- MYSQL mysql;
- MYSQL *db;
- MYSQL_RES *result;
- int status;
- my_ulonglong num_rows;
- int ref_count;
- char *db_name;
- char *db_user;
- char *db_password;
- char *db_address; /* host address */
- char *db_socket; /* socket for local access */
- int db_port; /* port of host address */
- bool connected;
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path;
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id;
- bool allow_transactions; /* transactions allowed */
- int changes; /* changes made to db */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- POOLMEM *esc_obj; /* Escaped restore object */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-#define DB_STATUS int
-
-/* "Generic" names for easier conversion */
-#define sql_store_result(x) mysql_store_result((x)->db)
-#define sql_use_result(x) mysql_use_result((x)->db)
-#define sql_free_result(x) my_mysql_free_result(x)
-#define sql_fetch_row(x) mysql_fetch_row((x)->result)
-#define sql_query(x, y) mysql_query((x)->db, (y))
-#define sql_strerror(x) mysql_error((x)->db)
-#define sql_num_rows(x) mysql_num_rows((x)->result)
-#define sql_data_seek(x, i) mysql_data_seek((x)->result, (i))
-#define sql_affected_rows(x) mysql_affected_rows((x)->db)
-#define sql_insert_autokey_record(x, y, z) my_mysql_insert_autokey_record((x), (y), (z))
-#define sql_field_seek(x, y) mysql_field_seek((x)->result, (y))
-#define sql_fetch_field(x) mysql_fetch_field((x)->result)
-#define sql_num_fields(x) (int)mysql_num_fields((x)->result)
-#define SQL_ROW MYSQL_ROW
-#define SQL_FIELD MYSQL_FIELD
-#define SQL_MATCH "MATCH"
-
-#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
-#define sql_batch_lock_path_query my_mysql_batch_lock_path_query
-#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query
-#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query
-#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query
-#define sql_batch_fill_path_query my_mysql_batch_fill_path_query
-
-
-extern const char* my_mysql_batch_lock_path_query;
-extern const char* my_mysql_batch_lock_filename_query;
-extern const char* my_mysql_batch_unlock_tables_query;
-extern const char* my_mysql_batch_fill_filename_query;
-extern const char* my_mysql_batch_fill_path_query;
-extern void my_mysql_free_result(B_DB *mdb);
-extern uint64_t my_mysql_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name);
-
-#else
-
-#ifdef HAVE_POSTGRESQL
-
-#include <libpq-fe.h>
-
-/* TEMP: the following is taken from select OID, typname from pg_type; */
-#define IS_NUM(x) ((x) == 20 || (x) == 21 || (x) == 23 || (x) == 700 || (x) == 701)
-#define IS_NOT_NULL(x) ((x) == 1)
-
-typedef char **POSTGRESQL_ROW;
-typedef struct pg_field {
- char *name;
- int max_length;
- unsigned int type;
- unsigned int flags; // 1 == not null
-} POSTGRESQL_FIELD;
-
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- *
- * P O S T G R E S Q L
- */
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- PGconn *db;
- PGresult *result;
- int status;
- POSTGRESQL_ROW row;
- POSTGRESQL_FIELD *fields;
- int num_rows;
- int row_size; /* size of malloced rows */
- int num_fields;
- int fields_size; /* size of malloced fields */
- int row_number; /* row number from my_postgresql_data_seek */
- int field_number; /* field number from my_postgresql_field_seek */
- int ref_count;
- char *db_name;
- char *db_user;
- char *db_password;
- char *db_address; /* host address */
- char *db_socket; /* socket for local access */
- int db_port; /* port of host address */
- bool connected;
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path;
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id;
- bool allow_transactions; /* transactions allowed */
- bool transaction; /* transaction started */
- int changes; /* changes made to db */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- unsigned char *esc_obj; /* Escaped restore object */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-void my_postgresql_free_result(B_DB *mdb);
-POSTGRESQL_ROW my_postgresql_fetch_row (B_DB *mdb);
-int my_postgresql_query (B_DB *mdb, const char *query);
-void my_postgresql_data_seek (B_DB *mdb, int row);
-uint64_t my_postgresql_insert_autokey_record (B_DB *mdb, const char *query, const char *table_name);
-void my_postgresql_field_seek (B_DB *mdb, int row);
-POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb);
-
-int my_postgresql_batch_start(JCR *jcr, B_DB *mdb);
-int my_postgresql_batch_end(JCR *jcr, B_DB *mdb, const char *error);
-typedef struct ATTR_DBR ATTR_DBR;
-int my_postgresql_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
-
-extern const char* my_pg_batch_lock_path_query;
-extern const char* my_pg_batch_lock_filename_query;
-extern const char* my_pg_batch_unlock_tables_query;
-extern const char* my_pg_batch_fill_filename_query;
-extern const char* my_pg_batch_fill_path_query;
-
-/* "Generic" names for easier conversion */
-#define sql_store_result(x) ((x)->result)
-#define sql_free_result(x) my_postgresql_free_result(x)
-#define sql_fetch_row(x) my_postgresql_fetch_row(x)
-#define sql_query(x, y) my_postgresql_query((x), (y))
-#define sql_close(x) PQfinish((x)->db)
-#define sql_strerror(x) PQerrorMessage((x)->db)
-#define sql_num_rows(x) ((unsigned) PQntuples((x)->result))
-#define sql_data_seek(x, i) my_postgresql_data_seek((x), (i))
-#define sql_affected_rows(x) ((unsigned) atoi(PQcmdTuples((x)->result)))
-#define sql_insert_autokey_record(x, y, z) my_postgresql_insert_autokey_record((x), (y), (z))
-#define sql_field_seek(x, y) my_postgresql_field_seek((x), (y))
-#define sql_fetch_field(x) my_postgresql_fetch_field(x)
-#define sql_num_fields(x) ((x)->num_fields)
-
-#define sql_batch_start(x,y) my_postgresql_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_postgresql_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_postgresql_batch_insert(x,y,z)
-#define sql_batch_lock_path_query my_pg_batch_lock_path_query
-#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query
-#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query
-#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query
-#define sql_batch_fill_path_query my_pg_batch_fill_path_query
-
-#define SQL_ROW POSTGRESQL_ROW
-#define SQL_FIELD POSTGRESQL_FIELD
-#define SQL_MATCH "~"
-
-#else
-
-#ifdef HAVE_INGRES
-
-#include "myingres.h"
-
-/* TEMP: the following is taken from $(II_SYSTEM)/ingres/files/eqsqlda.h IISQ_ types */
-#define IS_NUM(x) ((x) == 10 || (x) == 30 || (x) == 31)
-#define IS_NOT_NULL(x) ((x) == 1)
-
-typedef char **INGRES_ROW;
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- *
- * I N G R E S
- */
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- INGconn *db;
- INGresult *result;
- int status;
- INGRES_ROW row;
- INGRES_FIELD *fields;
- int num_rows;
- int row_size; /* size of malloced rows */
- int num_fields;
- int fields_size; /* size of malloced fields */
- int row_number; /* row number from my_ingres_data_seek */
- int field_number; /* field number from my_ingres_field_seek */
- int ref_count;
- char *db_name;
- char *db_user;
- char *db_password;
- char *db_address; /* host address */
- char *db_socket; /* socket for local access */
- int db_port; /* port of host address */
- int session_id; /* unique session id */
- bool connected;
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path;
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id;
- bool allow_transactions; /* transactions allowed */
- bool transaction; /* transaction started */
- bool explicit_commit; /* do an explicit commit after each query */
- int changes; /* changes made to db */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- alist *query_filters; /* Filters to convert sql queries into supported Ingres SQL */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-void my_ingres_free_result(B_DB *mdb);
-INGRES_ROW my_ingres_fetch_row (B_DB *mdb);
-int my_ingres_query (B_DB *mdb, const char *query);
-void my_ingres_data_seek (B_DB *mdb, int row);
-void my_ingres_field_seek (B_DB *mdb, int row);
-INGRES_FIELD * my_ingres_fetch_field(B_DB *mdb);
-void my_ingres_close (B_DB *mdb);
-uint64_t my_ingres_insert_autokey_record (B_DB *mdb, const char *query, const char *table_name);
-
-bool my_ingres_batch_start(JCR *jcr, B_DB *mdb);
-bool my_ingres_batch_end(JCR *jcr, B_DB *mdb, const char *error);
-typedef struct ATTR_DBR ATTR_DBR;
-bool my_ingres_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-char *my_ingres_copy_escape(char *dest, char *src, size_t len);
-
-extern const char* my_ingres_batch_lock_path_query;
-extern const char* my_ingres_batch_lock_filename_query;
-extern const char* my_ingres_batch_unlock_tables_query;
-extern const char* my_ingres_batch_fill_filename_query;
-extern const char* my_ingres_batch_fill_path_query;
-
-/* "Generic" names for easier conversion */
-#define sql_store_result(x) ((x)->result)
-#define sql_free_result(x) my_ingres_free_result(x)
-#define sql_fetch_row(x) my_ingres_fetch_row(x)
-#define sql_query(x, y) my_ingres_query((x), (y))
-#define sql_close(x) my_ingres_close(x)
-#define sql_strerror(x) INGerrorMessage((x)->db)
-#define sql_num_rows(x) ((unsigned) INGntuples((x)->result))
-#define sql_data_seek(x, i) my_ingres_data_seek((x), (i))
-#define sql_affected_rows(x) ((x)->num_rows)
-#define sql_insert_autokey_record(x, y, z) my_ingres_insert_autokey_record((x), (y), (z))
-#define sql_field_seek(x, y) my_ingres_field_seek((x), (y))
-#define sql_fetch_field(x) my_ingres_fetch_field(x)
-#define sql_num_fields(x) ((x)->num_fields)
-
-#define sql_batch_start(x,y) my_ingres_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_ingres_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_ingres_batch_insert(x,y,z)
-#define sql_batch_lock_path_query my_ingres_batch_lock_path_query
-#define sql_batch_lock_filename_query my_ingres_batch_lock_filename_query
-#define sql_batch_unlock_tables_query my_ingres_batch_unlock_tables_query
-#define sql_batch_fill_filename_query my_ingres_batch_fill_filename_query
-#define sql_batch_fill_path_query my_ingres_batch_fill_path_query
-
-#define SQL_ROW INGRES_ROW
-#define SQL_FIELD INGRES_FIELD
-#define SQL_MATCH "~"
-
-#else
-
-#ifdef HAVE_DBI
-
-#include <dbi/dbi.h>
-
-#ifdef HAVE_BATCH_FILE_INSERT
-#include <dbi/dbi-dev.h>
-#endif //HAVE_BATCH_FILE_INSERT
-
-#define IS_NUM(x) ((x) == 1 || (x) == 2 )
-#define IS_NOT_NULL(x) ((x) == (1 << 0))
-
-typedef char **DBI_ROW;
-typedef struct dbi_field {
- char *name;
- int max_length;
- unsigned int type;
- unsigned int flags; // 1 == not null
-} DBI_FIELD;
-
-typedef struct dbi_field_get {
- dlink link;
- char *value;
-} DBI_FIELD_GET;
-
-/*
- * This is the "real" definition that should only be
- * used inside sql.c and associated database interface
- * subroutines.
- *
- * D B I
- */
-struct B_DB {
- dlink link; /* queue control */
- brwlock_t lock; /* transaction lock */
- dbi_conn *db;
- dbi_result *result;
- dbi_inst instance;
- dbi_error_flag status;
- DBI_ROW row;
- DBI_FIELD *fields;
- DBI_FIELD_GET *field_get;
- int num_rows;
- int row_size; /* size of malloced rows */
- int num_fields;
- int fields_size; /* size of malloced fields */
- int row_number; /* row number from my_postgresql_data_seek */
- int field_number; /* field number from my_postgresql_field_seek */
- int ref_count;
- int db_type; /* DBI driver defined */
- char *db_driverdir ; /* DBI driver dir */
- char *db_driver; /* DBI type database */
- char *db_name;
- char *db_user;
- char *db_password;
- char *db_address; /* host address */
- char *db_socket; /* socket for local access */
- int db_port; /* port of host address */
- bool connected;
- POOLMEM *errmsg; /* nicely edited error message */
- POOLMEM *cmd; /* SQL command string */
- POOLMEM *cached_path;
- int cached_path_len; /* length of cached path */
- uint32_t cached_path_id;
- bool allow_transactions; /* transactions allowed */
- bool transaction; /* transaction started */
- int changes; /* changes made to db */
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file name */
- POOLMEM *esc_path; /* Escaped path name */
- POOLMEM *esc_obj; /* Escaped restore object */
- int fnl; /* file name length */
- int pnl; /* path name length */
-};
-
-void my_dbi_free_result(B_DB *mdb);
-DBI_ROW my_dbi_fetch_row (B_DB *mdb);
-int my_dbi_query (B_DB *mdb, const char *query);
-void my_dbi_data_seek (B_DB *mdb, int row);
-void my_dbi_field_seek (B_DB *mdb, int row);
-DBI_FIELD * my_dbi_fetch_field(B_DB *mdb);
-const char * my_dbi_strerror (B_DB *mdb);
-int my_dbi_getisnull (dbi_result *result, int row_number, int column_number);
-char * my_dbi_getvalue (dbi_result *result, int row_number, unsigned int column_number);
-//int my_dbi_getvalue (dbi_result *result, int row_number, unsigned int column_number, char *value);
-uint64_t my_dbi_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name);
-
-int my_dbi_batch_start(JCR *jcr, B_DB *mdb);
-int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error);
-typedef struct ATTR_DBR ATTR_DBR;
-int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
-// typedefs for libdbi work with postgresql copy insert
-typedef int (*custom_function_insert_t)(void*, const char*, int);
-typedef char* (*custom_function_error_t)(void*);
-typedef int (*custom_function_end_t)(void*, const char*);
-
-extern const char* my_dbi_batch_lock_path_query[5];
-extern const char* my_dbi_batch_lock_filename_query[5];
-extern const char* my_dbi_batch_unlock_tables_query[5];
-extern const char* my_dbi_batch_fill_filename_query[5];
-extern const char* my_dbi_batch_fill_path_query[5];
-extern const char* my_dbi_match[5];
-
-/* "Generic" names for easier conversion */
-#define sql_store_result(x) (x)->result
-#define sql_free_result(x) my_dbi_free_result(x)
-#define sql_fetch_row(x) my_dbi_fetch_row(x)
-#define sql_query(x, y) my_dbi_query((x), (y))
-#define sql_close(x) dbi_conn_close((x)->db)
-#define sql_strerror(x) my_dbi_strerror(x)
-#define sql_num_rows(x) dbi_result_get_numrows((x)->result)
-#define sql_data_seek(x, i) my_dbi_data_seek((x), (i))
-#define SQL_MATCH my_dbi_match[db_type]
-/* #define sql_affected_rows(x) dbi_result_get_numrows_affected((x)->result) */
-#define sql_affected_rows(x) 1
-#define sql_insert_autokey_record(x, y, z) my_dbi_insert_autokey_record((x), (y), (z))
-#define sql_field_seek(x, y) my_dbi_field_seek((x), (y))
-#define sql_fetch_field(x) my_dbi_fetch_field(x)
-#define sql_num_fields(x) ((x)->num_fields)
-#define sql_batch_start(x,y) my_dbi_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_dbi_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_dbi_batch_insert(x,y,z)
-#define sql_batch_lock_path_query my_dbi_batch_lock_path_query[db_type]
-#define sql_batch_lock_filename_query my_dbi_batch_lock_filename_query[db_type]
-#define sql_batch_unlock_tables_query my_dbi_batch_unlock_tables_query[db_type]
-#define sql_batch_fill_filename_query my_dbi_batch_fill_filename_query[db_type]
-#define sql_batch_fill_path_query my_dbi_batch_fill_path_query[db_type]
-
-#define SQL_ROW DBI_ROW
-#define SQL_FIELD DBI_FIELD
-
-#endif /* HAVE_SQLITE3 */
-#endif /* HAVE_MYSQL */
-#endif /* HAVE_SQLITE */
-#endif /* HAVE_POSTGRESQL */
-#endif /* HAVE_INGRES */
-#endif /* HAVE_DBI */
-#endif
-
-/* Use for better error location printing */
-#define UPDATE_DB(jcr, db, cmd) UpdateDB(__FILE__, __LINE__, jcr, db, cmd)
-#define INSERT_DB(jcr, db, cmd) InsertDB(__FILE__, __LINE__, jcr, db, cmd)
-#define QUERY_DB(jcr, db, cmd) QueryDB(__FILE__, __LINE__, jcr, db, cmd)
-#define DELETE_DB(jcr, db, cmd) DeleteDB(__FILE__, __LINE__, jcr, db, cmd)
-
-
-#else /* not __SQL_C */
-
-/* This is a "dummy" definition for use outside of sql.c
- */
-struct B_DB {
- int dummy; /* for SunOS compiler */
-};
-
-#endif /* __SQL_C */
+#ifndef __CATS_H_
+#define __CATS_H_ 1
/* ==============================================================
*
* cats directory.
*/
-extern uint32_t bacula_db_version;
-
#define faddr_t long
/*
~dbid_list(); /* in sql.c */
};
-
-
-
/* Job information passed to create job record and update
* job record at end of job. Note, although this record
* contains all the fields found in the Job database record,
db_list_ctx &operator=(const db_list_ctx&); /* prohibit class assignment */
};
+typedef enum {
+ SQL_INTERFACE_TYPE_MYSQL = 0,
+ SQL_INTERFACE_TYPE_POSTGRESQL = 1,
+ SQL_INTERFACE_TYPE_SQLITE3 = 2,
+ SQL_INTERFACE_TYPE_INGRES = 3,
+ SQL_INTERFACE_TYPE_DBI = 4
+} SQL_INTERFACETYPE;
+
+typedef enum {
+ SQL_TYPE_MYSQL = 0,
+ SQL_TYPE_POSTGRESQL = 1,
+ SQL_TYPE_SQLITE3 = 2,
+ SQL_TYPE_INGRES = 3,
+ SQL_TYPE_UNKNOWN = 99
+} SQL_DBTYPE;
+
+typedef void (DB_LIST_HANDLER)(void *, const char *);
+typedef int (DB_RESULT_HANDLER)(void *, int, char **);
+
+#define db_lock(mdb) mdb->_db_lock(__FILE__, __LINE__)
+#define db_unlock(mdb) mdb->_db_unlock(__FILE__, __LINE__)
+
+/* Current database version number for all drivers */
+#define BDB_VERSION 13
+
+class B_DB: public SMARTALLOC {
+protected:
+ brwlock_t m_lock; /* transaction lock */
+ dlink m_link; /* queue control */
+ SQL_INTERFACETYPE m_db_interface_type; /* type of backend used */
+ SQL_DBTYPE m_db_type; /* database type */
+ int m_ref_count; /* reference count */
+ bool m_connected; /* connection made to db */
+ bool m_have_batch_insert; /* have batch insert support ? */
+ char *m_db_driver; /* database driver */
+ char *m_db_driverdir; /* database driver dir */
+ char *m_db_name; /* database name */
+ char *m_db_user; /* database user */
+ char *m_db_address; /* host name address */
+ char *m_db_socket; /* socket for local access */
+ char *m_db_password; /* database password */
+ int m_db_port; /* port for host name address */
+ bool m_disabled_batch_insert; /* explicitly disabled batch insert mode ? */
+
+public:
+ POOLMEM *errmsg; /* nicely edited error message */
+ POOLMEM *cmd; /* SQL command string */
+ POOLMEM *cached_path; /* cached path name */
+ int cached_path_len; /* length of cached path */
+ uint32_t cached_path_id; /* cached path id */
+ int changes; /* changes during transaction */
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_path; /* Escaped path name */
+ POOLMEM *esc_obj; /* Escaped restore object */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+
+ /* methods */
+ B_DB() {};
+ virtual ~B_DB() {};
+ const char *get_db_name(void) { return m_db_name; };
+ const char *get_db_user(void) { return m_db_user; };
+ bool is_connected(void) { return m_connected; };
+ bool batch_insert_available(void) { return m_have_batch_insert; };
+ void increment_refcount(void) { m_ref_count++; };
+
+ /* low level methods */
+ bool db_match_database(const char *db_driver, const char *db_name,
+ const char *db_address, int db_port);
+ B_DB *db_clone_database_connection(JCR *jcr, bool mult_db_connections);
+ int db_get_type_index(void) { return m_db_type; };
+ const char *db_get_type(void);
+ void _db_lock(const char *file, int line);
+ void _db_unlock(const char *file, int line);
+ bool db_sql_query(const char *query, int flags=0);
+ void print_lock_info(FILE *fp);
+
+ /* Pure virtual low level methods */
+ virtual bool db_open_database(JCR *jcr) = 0;
+ virtual void db_close_database(JCR *jcr) = 0;
+ virtual void db_thread_cleanup(void) = 0;
+ virtual void db_escape_string(JCR *jcr, char *snew, char *old, int len) = 0;
+ virtual char *db_escape_object(JCR *jcr, char *old, int len) = 0;
+ virtual void db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len) = 0;
+ virtual void db_start_transaction(JCR *jcr) = 0;
+ virtual void db_end_transaction(JCR *jcr) = 0;
+ virtual bool db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx) = 0;
+};
+
+/* sql_query Query Flags */
+#define QF_STORE_RESULT 0x01
+
+/* Use for better error location printing */
+#define UPDATE_DB(jcr, db, cmd) UpdateDB(__FILE__, __LINE__, jcr, db, cmd)
+#define INSERT_DB(jcr, db, cmd) InsertDB(__FILE__, __LINE__, jcr, db, cmd)
+#define QUERY_DB(jcr, db, cmd) QueryDB(__FILE__, __LINE__, jcr, db, cmd)
+#define DELETE_DB(jcr, db, cmd) DeleteDB(__FILE__, __LINE__, jcr, db, cmd)
#include "protos.h"
#include "jcr.h"
#include "sql_cmds.h"
/*
- * Exported globals from sql.c
- */
-extern int CATS_IMP_EXP db_type; /* SQL engine type index */
-
-/*
- * Some functions exported by sql.c for use within the
- * cats directory.
+ * Some functions exported by sql.c for use within the cats directory.
*/
void list_result(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *send, void *ctx, e_list_type type);
void list_dashes(B_DB *mdb, DB_LIST_HANDLER *send, void *ctx);
int get_sql_record_max(JCR *jcr, B_DB *mdb);
bool check_tables_version(JCR *jcr, B_DB *mdb);
bool db_check_max_connections(JCR *jcr, B_DB *mdb, uint32_t nb);
-void _db_unlock(const char *file, int line, B_DB *mdb);
-void _db_lock(const char *file, int line, B_DB *mdb);
-const char *db_get_type(void);
void print_dashes(B_DB *mdb);
void print_result(B_DB *mdb);
int DeleteDB(const char *file, int line, JCR *jcr, B_DB *db, char *delete_cmd);
int UpdateDB(const char *file, int line, JCR *jcr, B_DB *db, char *update_cmd);
void split_path_and_file(JCR *jcr, B_DB *mdb, const char *fname);
-#endif /* __SQL_H_ */
+#endif /* __CATS_H_ */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2010-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Dummy bacula backend function replaced with the correct one at install time.
+ */
+
+#include "bacula.h"
+#include "cats.h"
+
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
+ const char *db_password, const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert)
+{
+ Jmsg(jcr, M_FATAL, 0, _("Please replace this dummy libbaccats library with a proper one.\n"));
+
+ return NULL;
+}
+
#!/bin/sh
#
# This routine creates the Bacula database
-# using PostgreSQL, Ingres, MySQL, or SQLite.
+# using PostgreSQL, Ingres, MySQL, or SQLite.
#
-if test xsqlite3 = x@DB_TYPE@ ; then
- echo "Creating SQLite database"
- @scriptdir@/create_@DB_TYPE@_database
-else
- if test xmysql = x@DB_TYPE@ ; then
- echo "Creating MySQL database"
- @scriptdir@/create_mysql_database $*
- elif test xingres = x@DB_TYPE@ ; then
- echo "Creating Ingres database with $*"
- @scriptdir@/create_ingres_database $*
- else
- echo "Creating PostgreSQL database"
- @scriptdir@/create_postgresql_database $*
- fi
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+echo "Creating ${db_type} database"
+@scriptdir@/create_${db_type}_database $*
# shell script to create Bacula database(s)
#
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
db_user=${db_user:-@db_user@}
# shell script to create Bacula database(s)
#
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
db_name=@db_name@
if $bindir/mysql $* -f <<END-OF-DATA
# shell script to create Bacula database(s)
#
-PATH="@SQL_BINDIR@:$PATH"
+PATH="@POSTGRESQL_BINDIR@:$PATH"
db_name=${db_name:-@db_name@}
#
#
# shell script to create Bacula SQLite tables
-bindir=@SQL_BINDIR@
+bindir=@SQLITE_BINDIR@
cd @working_dir@
-sqlite=@DB_TYPE@
db_name=@db_name@
-${bindir}/${sqlite} $* ${db_name}.db <<END-OF-DATA
+${bindir}/sqlite3 $* ${db_name}.db <<END-OF-DATA
END-OF-DATA
exit 0
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
* based upon work done by Dan Langille, December 2003 and
* by Kern Sibbald, March 2000
*
+ * Major rewrite by Marco van Wieringen, January 2010 for catalog refactoring.
*/
/*
* This code only compiles against a recent version of libdbi. The current
* cvs co :pserver:anonymous@libdbi-drivers.cvs.sourceforge.net:/cvsroot/libdbi-drivers
*/
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
#ifdef HAVE_DBI
+#include "cats.h"
+#include "bdb_priv.h"
+#include <dbi/dbi.h>
+#include <dbi/dbi-dev.h>
+#include <bdb_dbi.h>
+
/* -----------------------------------------------------------------------
*
* DBI dependent defines and subroutines
* -----------------------------------------------------------------------
*/
-/* List of open databases */
+/*
+ * List of open databases
+ */
static dlist *db_list = NULL;
-/* Control allocated fields by my_dbi_getvalue */
+/*
+ * Control allocated fields by dbi_getvalue
+ */
static dlist *dbi_getvalue_list = NULL;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-/*
- * Retrieve database type
- */
-const char *
-db_get_type(void)
-{
- return "DBI";
-}
-
-/*
- * Initialize database data structure. In principal this should
- * never have errors, or it is really fatal.
- */
-B_DB *
-db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
- int mult_db_connections)
+typedef int (*custom_function_insert_t)(void*, const char*, int);
+typedef char* (*custom_function_error_t)(void*);
+typedef int (*custom_function_end_t)(void*, const char*);
+
+B_DB_DBI::B_DB_DBI(JCR *jcr,
+ const char *db_driver,
+ const char *db_name,
+ const char *db_user,
+ const char *db_password,
+ const char *db_address,
+ int db_port,
+ const char *db_socket,
+ bool mult_db_connections,
+ bool disable_batch_insert)
{
- B_DB *mdb = NULL;
- DBI_FIELD_GET *field;
- char db_driver[10];
+ char *p;
+ char new_db_driver[10];
char db_driverdir[256];
+ DBI_FIELD_GET *field;
- /* Constraint the db_driver */
- if(db_type == -1) {
- Jmsg(jcr, M_FATAL, 0, _("A dbi driver for DBI must be supplied.\n"));
- return NULL;
+ p = (char *)(db_driver + 4);
+ if (strcasecmp(p, "mysql") == 0) {
+ m_db_type = SQL_TYPE_MYSQL;
+ bstrncpy(new_db_driver, "mysql", sizeof(new_db_driver));
+ } else if (strcasecmp(p, "postgresql") == 0) {
+ m_db_type = SQL_TYPE_POSTGRESQL;
+ bstrncpy(new_db_driver, "pgsql", sizeof(new_db_driver));
+ } else if (strcasecmp(p, "sqlite3") == 0) {
+ m_db_type = SQL_TYPE_SQLITE3;
+ bstrncpy(new_db_driver, "sqlite3", sizeof(new_db_driver));
+ } else if (strcasecmp(p, "ingres") == 0) {
+ m_db_type = SQL_TYPE_INGRES;
+ bstrncpy(new_db_driver, "ingres", sizeof(new_db_driver));
+ } else {
+ Jmsg(jcr, M_ABORT, 0, _("Unknown database type: %s\n"), p);
+ return;
}
- /* Do the correct selection of driver.
- * Can be one of the varius supported by libdbi
+ /*
+ * Set db_driverdir whereis is the libdbi drivers
*/
- switch (db_type) {
- case SQL_TYPE_MYSQL:
- bstrncpy(db_driver,"mysql", sizeof(db_driver));
- break;
- case SQL_TYPE_POSTGRESQL:
- bstrncpy(db_driver,"pgsql", sizeof(db_driver));
- break;
- case SQL_TYPE_SQLITE:
- bstrncpy(db_driver,"sqlite", sizeof(db_driver));
- break;
- case SQL_TYPE_SQLITE3:
- bstrncpy(db_driver,"sqlite3", sizeof(db_driver));
- break;
- }
-
- /* Set db_driverdir whereis is the libdbi drivers */
bstrncpy(db_driverdir, DBI_DRIVER_DIR, 255);
- if (!db_user) {
- Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n"));
- return NULL;
- }
- P(mutex); /* lock DB queue */
- if (db_list == NULL) {
- db_list = New(dlist(mdb, &mdb->link));
- dbi_getvalue_list = New(dlist(field, &field->link));
- }
- if (!mult_db_connections) {
- /* Look to see if DB already open */
- foreach_dlist(mdb, db_list) {
- if (bstrcmp(mdb->db_name, db_name) &&
- bstrcmp(mdb->db_address, db_address) &&
- bstrcmp(mdb->db_driver, db_driver) &&
- mdb->db_port == db_port) {
- Dmsg4(100, "DB REopen %d %s %s erro: %d\n", mdb->ref_count, db_driver, db_name,
- dbi_conn_error(mdb->db, NULL));
- mdb->ref_count++;
- V(mutex);
- return mdb; /* already open */
- }
- }
- }
- Dmsg0(100, "db_open first time\n");
- mdb = (B_DB *)malloc(sizeof(B_DB));
- memset(mdb, 0, sizeof(B_DB));
- mdb->db_name = bstrdup(db_name);
- mdb->db_user = bstrdup(db_user);
+ /*
+ * Initialize the parent class members.
+ */
+ m_db_interface_type = SQL_INTERFACE_TYPE_DBI;
+ m_db_name = bstrdup(db_name);
+ m_db_user = bstrdup(db_user);
if (db_password) {
- mdb->db_password = bstrdup(db_password);
+ m_db_password = bstrdup(db_password);
}
if (db_address) {
- mdb->db_address = bstrdup(db_address);
+ m_db_address = bstrdup(db_address);
}
if (db_socket) {
- mdb->db_socket = bstrdup(db_socket);
+ m_db_socket = bstrdup(db_socket);
}
if (db_driverdir) {
- mdb->db_driverdir = bstrdup(db_driverdir);
+ m_db_driverdir = bstrdup(db_driverdir);
}
- if (db_driver) {
- mdb->db_driver = bstrdup(db_driver);
+ m_db_driver = bstrdup(new_db_driver);
+ m_db_port = db_port;
+ if (disable_batch_insert) {
+ m_disabled_batch_insert = true;
+ m_have_batch_insert = false;
+ } else {
+ m_disabled_batch_insert = false;
+#if defined(USE_BATCH_FILE_INSERT)
+#ifdef HAVE_DBI_BATCH_FILE_INSERT
+ m_have_batch_insert = true;
+#else
+ m_have_batch_insert = false;
+#endif /* HAVE_DBI_BATCH_FILE_INSERT */
+#else
+ m_have_batch_insert = false;
+#endif /* USE_BATCH_FILE_INSERT */
}
- mdb->db_type = db_type;
- mdb->db_port = db_port;
- mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
- *mdb->errmsg = 0;
- mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
- mdb->cached_path = get_pool_memory(PM_FNAME);
- mdb->cached_path_id = 0;
- mdb->ref_count = 1;
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
- mdb->esc_path = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- db_list->append(mdb); /* put db in list */
- V(mutex);
- return mdb;
+ errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *errmsg = 0;
+ cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ cached_path = get_pool_memory(PM_FNAME);
+ cached_path_id = 0;
+ m_ref_count = 1;
+ fname = get_pool_memory(PM_FNAME);
+ path = get_pool_memory(PM_FNAME);
+ esc_name = get_pool_memory(PM_FNAME);
+ esc_path = get_pool_memory(PM_FNAME);
+ m_allow_transactions = mult_db_connections;
+
+ /*
+ * Initialize the private members.
+ */
+ m_db_handle = NULL;
+ m_result = NULL;
+ m_field_get = NULL;
+
+ /*
+ * Put the db in the list.
+ */
+ if (db_list == NULL) {
+ db_list = New(dlist(this, &this->m_link));
+ dbi_getvalue_list = New(dlist(field, &field->link));
+ }
+ db_list->append(this);
+}
+
+B_DB_DBI::~B_DB_DBI()
+{
}
/*
* Now actually open the database. This can generate errors,
* which are returned in the errmsg
*
- * DO NOT close the database or free(mdb) here !!!!
+ * DO NOT close the database or delete mdb here !!!!
*/
-int
-db_open_database(JCR *jcr, B_DB *mdb)
+bool B_DB_DBI::db_open_database(JCR *jcr)
{
+ bool retval = false;
int errstat;
int dbstat;
uint8_t len;
- const char *errmsg;
+ const char *dbi_errmsg;
char buf[10], *port;
int numdrivers;
- char *db_name = NULL;
- char *db_dir = NULL;
+ char *new_db_name = NULL;
+ char *new_db_dir = NULL;
P(mutex);
- if (mdb->connected) {
- V(mutex);
- return 1;
+ if (m_connected) {
+ retval = true;
+ goto bail_out;
}
- mdb->connected = false;
- if ((errstat=rwl_init(&mdb->lock)) != 0) {
+ if ((errstat=rwl_init(&m_lock)) != 0) {
berrno be;
- Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ Mmsg1(&errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
be.bstrerror(errstat));
- V(mutex);
- return 0;
+ goto bail_out;
}
- if (mdb->db_port) {
- bsnprintf(buf, sizeof(buf), "%d", mdb->db_port);
+ if (m_db_port) {
+ bsnprintf(buf, sizeof(buf), "%d", m_db_port);
port = buf;
} else {
port = NULL;
}
- numdrivers = dbi_initialize_r(mdb->db_driverdir, &(mdb->instance));
+ numdrivers = dbi_initialize_r(m_db_driverdir, &(m_instance));
if (numdrivers < 0) {
- Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
+ Mmsg2(&errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
"db_driverdir=%s. It is probaly not found any drivers\n"),
- mdb->db_driverdir,numdrivers);
- V(mutex);
- return 0;
+ m_db_driverdir,numdrivers);
+ goto bail_out;
}
- mdb->db = (void **)dbi_conn_new_r(mdb->db_driver, mdb->instance);
- /* Can be many types of databases */
- switch (mdb->db_type) {
+ m_db_handle = (void **)dbi_conn_new_r(m_db_driver, m_instance);
+ /*
+ * Can be many types of databases
+ */
+ switch (m_db_type) {
case SQL_TYPE_MYSQL:
- dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
- dbi_conn_set_option(mdb->db, "port", port); /* default port */
- dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
- dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
- dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
+ dbi_conn_set_option(m_db_handle, "host", m_db_address); /* default = localhost */
+ dbi_conn_set_option(m_db_handle, "port", port); /* default port */
+ dbi_conn_set_option(m_db_handle, "username", m_db_user); /* login name */
+ dbi_conn_set_option(m_db_handle, "password", m_db_password); /* password */
+ dbi_conn_set_option(m_db_handle, "dbname", m_db_name); /* database name */
break;
case SQL_TYPE_POSTGRESQL:
- dbi_conn_set_option(mdb->db, "host", mdb->db_address);
- dbi_conn_set_option(mdb->db, "port", port);
- dbi_conn_set_option(mdb->db, "username", mdb->db_user);
- dbi_conn_set_option(mdb->db, "password", mdb->db_password);
- dbi_conn_set_option(mdb->db, "dbname", mdb->db_name);
- break;
- case SQL_TYPE_SQLITE:
- len = strlen(working_directory) + 5;
- db_dir = (char *)malloc(len);
- strcpy(db_dir, working_directory);
- strcat(db_dir, "/");
- len = strlen(mdb->db_name) + 5;
- db_name = (char *)malloc(len);
- strcpy(db_name, mdb->db_name);
- strcat(db_name, ".db");
- dbi_conn_set_option(mdb->db, "sqlite_dbdir", db_dir);
- dbi_conn_set_option(mdb->db, "dbname", db_name);
+ dbi_conn_set_option(m_db_handle, "host", m_db_address);
+ dbi_conn_set_option(m_db_handle, "port", port);
+ dbi_conn_set_option(m_db_handle, "username", m_db_user);
+ dbi_conn_set_option(m_db_handle, "password", m_db_password);
+ dbi_conn_set_option(m_db_handle, "dbname", m_db_name);
break;
case SQL_TYPE_SQLITE3:
len = strlen(working_directory) + 5;
- db_dir = (char *)malloc(len);
- strcpy(db_dir, working_directory);
- strcat(db_dir, "/");
- len = strlen(mdb->db_name) + 5;
- db_name = (char *)malloc(len);
- strcpy(db_name, mdb->db_name);
- strcat(db_name, ".db");
- dbi_conn_set_option(mdb->db, "sqlite3_dbdir", db_dir);
- dbi_conn_set_option(mdb->db, "dbname", db_name);
- Dmsg2(500, "SQLITE: %s %s\n", db_dir, db_name);
+ new_db_dir = (char *)malloc(len);
+ strcpy(new_db_dir, working_directory);
+ strcat(new_db_dir, "/");
+ len = strlen(m_db_name) + 5;
+ new_db_name = (char *)malloc(len);
+ strcpy(new_db_name, m_db_name);
+ strcat(new_db_name, ".db");
+ dbi_conn_set_option(m_db_handle, "sqlite3_dbdir", new_db_dir);
+ dbi_conn_set_option(m_db_handle, "dbname", new_db_name);
+ Dmsg2(500, "SQLITE: %s %s\n", new_db_dir, new_db_name);
+ free(new_db_dir);
+ free(new_db_name);
break;
}
- /* If connection fails, try at 5 sec intervals for 30 seconds. */
+ /*
+ * If connection fails, try at 5 sec intervals for 30 seconds.
+ */
for (int retry=0; retry < 6; retry++) {
-
- dbstat = dbi_conn_connect(mdb->db);
- if ( dbstat == 0) {
+ dbstat = dbi_conn_connect(m_db_handle);
+ if (dbstat == 0) {
break;
}
- dbi_conn_error(mdb->db, &errmsg);
- Dmsg1(50, "dbi error: %s\n", errmsg);
+ dbi_conn_error(m_db_handle, &dbi_errmsg);
+ Dmsg1(50, "dbi error: %s\n", dbi_errmsg);
bmicrosleep(5, 0);
-
}
- if ( dbstat != 0 ) {
- Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface. Type=%s Database=%s User=%s\n"
+ if (dbstat != 0 ) {
+ Mmsg3(&errmsg, _("Unable to connect to DBI interface. Type=%s Database=%s User=%s\n"
"Possible causes: SQL server not running; password incorrect; max_connections exceeded.\n"),
- mdb->db_driver, mdb->db_name, mdb->db_user);
- V(mutex);
- return 0;
+ m_db_driver, m_db_name, m_db_user);
+ goto bail_out;
}
Dmsg0(50, "dbi_real_connect done\n");
Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n",
- mdb->db_user, mdb->db_name,
- mdb->db_password==NULL?"(NULL)":mdb->db_password);
+ m_db_user, m_db_name,
+ (m_db_password == NULL) ? "(NULL)" : m_db_password);
- mdb->connected = true;
+ m_connected = true;
- if (!check_tables_version(jcr, mdb)) {
- V(mutex);
- return 0;
+ if (!check_tables_version(jcr, this)) {
+ goto bail_out;
}
- switch (mdb->db_type) {
+ switch (m_db_type) {
case SQL_TYPE_MYSQL:
- /* Set connection timeout to 8 days specialy for batch mode */
- sql_query(mdb, "SET wait_timeout=691200");
- sql_query(mdb, "SET interactive_timeout=691200");
+ /*
+ * Set connection timeout to 8 days specialy for batch mode
+ */
+ sql_query("SET wait_timeout=691200");
+ sql_query("SET interactive_timeout=691200");
break;
case SQL_TYPE_POSTGRESQL:
- /* tell PostgreSQL we are using standard conforming strings
- and avoid warnings such as:
- WARNING: nonstandard use of \\ in a string literal
- */
- sql_query(mdb, "SET datestyle TO 'ISO, YMD'");
- sql_query(mdb, "set standard_conforming_strings=on");
+ /*
+ * Tell PostgreSQL we are using standard conforming strings
+ * and avoid warnings such as:
+ * WARNING: nonstandard use of \\ in a string literal
+ */
+ sql_query("SET datestyle TO 'ISO, YMD'");
+ sql_query("SET standard_conforming_strings=on");
break;
}
- if(db_dir) {
- free(db_dir);
- }
- if(db_name) {
- free(db_name);
- }
+ retval = true;
+bail_out:
V(mutex);
- return 1;
+ return retval;
}
-void
-db_close_database(JCR *jcr, B_DB *mdb)
+void B_DB_DBI::db_close_database(JCR *jcr)
{
- if (!mdb) {
- return;
- }
- db_end_transaction(jcr, mdb);
+ db_end_transaction(jcr);
P(mutex);
- sql_free_result(mdb);
- mdb->ref_count--;
- if (mdb->ref_count == 0) {
- db_list->remove(mdb);
- if (mdb->connected && mdb->db) {
- //sql_close(mdb);
- dbi_shutdown_r(mdb->instance);
- mdb->db = NULL;
- mdb->instance = NULL;
+ sql_free_result();
+ m_ref_count--;
+ if (m_ref_count == 0) {
+ db_list->remove(this);
+ if (m_connected && m_db_handle) {
+ dbi_shutdown_r(m_instance);
+ m_db_handle = NULL;
+ m_instance = NULL;
}
- rwl_destroy(&mdb->lock);
- free_pool_memory(mdb->errmsg);
- free_pool_memory(mdb->cmd);
- free_pool_memory(mdb->cached_path);
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
- free_pool_memory(mdb->esc_path);
- if (mdb->db_name) {
- free(mdb->db_name);
+ rwl_destroy(&m_lock);
+ free_pool_memory(errmsg);
+ free_pool_memory(cmd);
+ free_pool_memory(cached_path);
+ free_pool_memory(fname);
+ free_pool_memory(path);
+ free_pool_memory(esc_name);
+ free_pool_memory(esc_path);
+ if (m_db_driver) {
+ free(m_db_driver);
}
- if (mdb->db_user) {
- free(mdb->db_user);
+ if (m_db_name) {
+ free(m_db_name);
}
- if (mdb->db_password) {
- free(mdb->db_password);
+ if (m_db_user) {
+ free(m_db_user);
}
- if (mdb->db_address) {
- free(mdb->db_address);
+ if (m_db_password) {
+ free(m_db_password);
}
- if (mdb->db_socket) {
- free(mdb->db_socket);
+ if (m_db_address) {
+ free(m_db_address);
}
- if (mdb->db_driverdir) {
- free(mdb->db_driverdir);
+ if (m_db_socket) {
+ free(m_db_socket);
}
- if (mdb->db_driver) {
- free(mdb->db_driver);
+ if (m_db_driverdir) {
+ free(m_db_driverdir);
}
- free(mdb);
+ delete this;
if (db_list->size() == 0) {
delete db_list;
db_list = NULL;
V(mutex);
}
-void db_check_backend_thread_safe()
-{ }
-
-void db_thread_cleanup()
-{ }
-
-/*
- * Return the next unique index (auto-increment) for
- * the given table. Return NULL on error.
- *
- */
-int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index)
+void B_DB_DBI::db_thread_cleanup(void)
{
- strcpy(index, "NULL");
- return 1;
}
-
/*
* Escape strings so that DBI is happy
*
* We need copy the value of pointer to snew because libdbi change the
* pointer
*/
-void
-db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+void B_DB_DBI::db_escape_string(JCR *jcr, char *snew, char *old, int len)
{
char *inew;
char *pnew;
if (len == 0) {
snew[0] = 0;
} else {
- /* correct the size of old basead in len
- * and copy new string to inew
+ /*
+ * Correct the size of old basead in len and copy new string to inew
*/
inew = (char *)malloc(sizeof(char) * len + 1);
bstrncpy(inew,old,len + 1);
- /* escape the correct size of old */
- dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
+ /*
+ * Escape the correct size of old
+ */
+ dbi_conn_escape_string_copy(m_db_handle, inew, &pnew);
free(inew);
- /* copy the escaped string to snew */
+ /*
+ * Copy the escaped string to snew
+ */
bstrncpy(snew, pnew, 2 * len + 1);
}
Dmsg2(500, "dbi_conn_escape_string_copy %p %s\n",snew,snew);
-
}
/*
- * Submit a general SQL command (cmd), and for each row returned,
- * the sqlite_handler is called with the ctx.
+ * Escape binary object so that DBI is happy
+ * Memory is stored in B_DB struct, no need to free it
*/
-bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+char *B_DB_DBI::db_escape_object(JCR *jcr, char *old, int len)
{
- SQL_ROW row;
-
- Dmsg0(500, "db_sql_query started\n");
+ size_t new_len;
+ char *pnew;
- db_lock(mdb);
- if (sql_query(mdb, query) != 0) {
- Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb));
- db_unlock(mdb);
- Dmsg0(500, "db_sql_query failed\n");
- return false;
+ if (len == 0) {
+ esc_obj[0] = 0;
+ } else {
+ new_len = dbi_conn_escape_string_copy(m_db_handle, esc_obj, &pnew);
+ esc_obj = check_pool_memory_size(esc_obj, new_len+1);
+ memcpy(esc_obj, pnew, new_len);
}
- Dmsg0(500, "db_sql_query succeeded. checking handler\n");
-
- if (result_handler != NULL) {
- Dmsg0(500, "db_sql_query invoking handler\n");
- if ((mdb->result = sql_store_result(mdb)) != NULL) {
- int num_fields = sql_num_fields(mdb);
- Dmsg0(500, "db_sql_query sql_store_result suceeded\n");
- while ((row = sql_fetch_row(mdb)) != NULL) {
-
- Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
- if (result_handler(ctx, num_fields, row))
- break;
- }
+ return esc_obj;
+}
- sql_free_result(mdb);
- }
+/*
+ * Unescape binary object so that DBI is happy
+ */
+void B_DB_DBI::db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *dest_len)
+{
+ if (!from) {
+ *dest[0] = 0;
+ *dest_len = 0;
+ return;
}
- db_unlock(mdb);
-
- Dmsg0(500, "db_sql_query finished\n");
-
- return true;
+ *dest = check_pool_memory_size(*dest, expected_len+1);
+ *dest_len = expected_len;
+ memcpy(*dest, from, expected_len);
+ (*dest)[expected_len]=0;
}
-
-
-DBI_ROW my_dbi_fetch_row(B_DB *mdb)
+/*
+ * Start a transaction. This groups inserts and makes things
+ * much more efficient. Usually started when inserting
+ * file attributes.
+ */
+void B_DB_DBI::db_start_transaction(JCR *jcr)
{
- int j;
- DBI_ROW row = NULL; // by default, return NULL
-
- Dmsg0(500, "my_dbi_fetch_row start\n");
- if ((!mdb->row || mdb->row_size < mdb->num_fields) && mdb->num_rows > 0) {
- int num_fields = mdb->num_fields;
- Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields);
-
- if (mdb->row) {
- Dmsg0(500, "my_dbi_fetch_row freeing space\n");
- Dmsg2(500, "my_dbi_free_row row: '%p' num_fields: '%d'\n", mdb->row, mdb->num_fields);
- if (mdb->num_rows != 0) {
- for(j = 0; j < mdb->num_fields; j++) {
- Dmsg2(500, "my_dbi_free_row row '%p' '%d'\n", mdb->row[j], j);
- if(mdb->row[j]) {
- free(mdb->row[j]);
- }
- }
- }
- free(mdb->row);
+ if (!jcr->attr) {
+ jcr->attr = get_pool_memory(PM_FNAME);
+ }
+ if (!jcr->ar) {
+ jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
+ }
+
+ switch (m_db_type) {
+ case SQL_TYPE_SQLITE3:
+ if (!m_allow_transactions) {
+ return;
}
- //num_fields += 20; /* add a bit extra */
- mdb->row = (DBI_ROW)malloc(sizeof(char *) * num_fields);
- mdb->row_size = num_fields;
- // now reset the row_number now that we have the space allocated
- mdb->row_number = 1;
- }
+ db_lock(this);
+ /*
+ * Allow only 10,000 changes per transaction
+ */
+ if (m_transaction && changes > 10000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start SQLite transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ /*
+ * This is turned off because transactions break
+ * if multiple simultaneous jobs are run.
+ */
+ if (!m_allow_transactions) {
+ return;
+ }
- // if still within the result set
- if (mdb->row_number <= mdb->num_rows && mdb->row_number != DBI_ERROR_BADPTR) {
- Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (1..%d)\n", mdb->row_number, mdb->num_rows);
- // get each value from this row
- for (j = 0; j < mdb->num_fields; j++) {
- mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
- // allocate space to queue row
- mdb->field_get = (DBI_FIELD_GET *)malloc(sizeof(DBI_FIELD_GET));
- // store the pointer in queue
- mdb->field_get->value = mdb->row[j];
- Dmsg4(500, "my_dbi_fetch_row row[%d] field: '%p' in queue: '%p' has value: '%s'\n",
- j, mdb->row[j], mdb->field_get->value, mdb->row[j]);
- // insert in queue to future free
- dbi_getvalue_list->append(mdb->field_get);
+ db_lock(this);
+ /*
+ * Allow only 25,000 changes per transaction
+ */
+ if (m_transaction && changes > 25000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start PosgreSQL transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
+ break;
+ case SQL_TYPE_INGRES:
+ if (!m_allow_transactions) {
+ return;
}
- // increment the row number for the next call
- mdb->row_number++;
- row = mdb->row;
- } else {
- Dmsg2(500, "my_dbi_fetch_row row number '%d' is NOT acceptable (1..%d)\n", mdb->row_number, mdb->num_rows);
+ db_lock(this);
+ /*
+ * Allow only 25,000 changes per transaction
+ */
+ if (m_transaction && changes > 25000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start Ingres transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
+ break;
+ default:
+ break;
}
+}
- Dmsg1(500, "my_dbi_fetch_row finishes returning %p\n", row);
+void B_DB_DBI::db_end_transaction(JCR *jcr)
+{
+ if (jcr && jcr->cached_attribute) {
+ Dmsg0(400, "Flush last cached attribute.\n");
+ if (!db_create_attributes_record(jcr, this, jcr->ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
+ }
+ jcr->cached_attribute = false;
+ }
- return row;
-}
+ switch (m_db_type) {
+ case SQL_TYPE_SQLITE3:
+ if (!m_allow_transactions) {
+ return;
+ }
-int my_dbi_max_length(B_DB *mdb, int field_num) {
- //
- // for a given column, find the max length
- //
- int max_length;
- int i;
- int this_length;
- char *cbuf = NULL;
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End SQLite transaction changes=%d\n", changes);
+ }
+ changes = 0;
+ db_unlock(this);
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ if (!m_allow_transactions) {
+ return;
+ }
- max_length = 0;
- for (i = 0; i < mdb->num_rows; i++) {
- if (my_dbi_getisnull(mdb->result, i, field_num)) {
- this_length = 4; // "NULL"
- } else {
- cbuf = my_dbi_getvalue(mdb->result, i, field_num);
- this_length = cstrlen(cbuf);
- // cbuf is always free
- free(cbuf);
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End PostgreSQL transaction changes=%d\n", changes);
+ }
+ changes = 0;
+ db_unlock(this);
+ break;
+ case SQL_TYPE_INGRES:
+ if (!m_allow_transactions) {
+ return;
}
- if (max_length < this_length) {
- max_length = this_length;
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End Ingres transaction changes=%d\n", changes);
}
+ changes = 0;
+ db_unlock(this);
+ break;
+ default:
+ break;
}
-
- return max_length;
}
-DBI_FIELD * my_dbi_fetch_field(B_DB *mdb)
+/*
+ * Submit a general SQL command (cmd), and for each row returned,
+ * the result_handler is called with the ctx.
+ */
+bool B_DB_DBI::db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
{
- int i;
- int dbi_index;
-
- Dmsg0(500, "my_dbi_fetch_field starts\n");
-
- if (!mdb->fields || mdb->fields_size < mdb->num_fields) {
- if (mdb->fields) {
- free(mdb->fields);
- }
- Dmsg1(500, "allocating space for %d fields\n", mdb->num_fields);
- mdb->fields = (DBI_FIELD *)malloc(sizeof(DBI_FIELD) * mdb->num_fields);
- mdb->fields_size = mdb->num_fields;
+ bool retval = true;
+ SQL_ROW row;
- for (i = 0; i < mdb->num_fields; i++) {
- // num_fileds is starting at 1, increment i by 1
- dbi_index = i + 1;
- Dmsg1(500, "filling field %d\n", i);
- mdb->fields[i].name = (char *)dbi_result_get_field_name(mdb->result, dbi_index);
- mdb->fields[i].max_length = my_dbi_max_length(mdb, i);
- mdb->fields[i].type = dbi_result_get_field_type_idx(mdb->result, dbi_index);
- mdb->fields[i].flags = dbi_result_get_field_attribs_idx(mdb->result, dbi_index);
+ Dmsg1(500, "db_sql_query starts with %s\n", query);
- Dmsg4(500, "my_dbi_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
- mdb->fields[i].name, mdb->fields[i].max_length, mdb->fields[i].type,
- mdb->fields[i].flags);
- } // end for
- } // end if
+ db_lock(this);
+ if (!sql_query(query, QF_STORE_RESULT)) {
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror());
+ Dmsg0(500, "db_sql_query failed\n");
+ retval = false;
+ goto bail_out;
+ }
- // increment field number for the next time around
+ Dmsg0(500, "db_sql_query succeeded. checking handler\n");
- Dmsg0(500, "my_dbi_fetch_field finishes\n");
- return &mdb->fields[mdb->field_number++];
-}
+ if (result_handler != NULL) {
+ Dmsg0(500, "db_sql_query invoking handler\n");
+ while ((row = sql_fetch_row()) != NULL) {
+ Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
+ if (result_handler(ctx, m_num_fields, row))
+ break;
+ }
+ sql_free_result();
+ }
-void my_dbi_data_seek(B_DB *mdb, int row)
-{
- // set the row number to be returned on the next call
- // to my_dbi_fetch_row
- mdb->row_number = row;
-}
+ Dmsg0(500, "db_sql_query finished\n");
-void my_dbi_field_seek(B_DB *mdb, int field)
-{
- mdb->field_number = field;
+bail_out:
+ db_unlock(this);
+ return retval;
}
/*
* Note, if this routine returns 1 (failure), Bacula expects
* that no result has been stored.
*
- * Returns: 0 on success
- * 1 on failure
- *
+ * Returns: true on success
+ * false on failure
*/
-int my_dbi_query(B_DB *mdb, const char *query)
+bool B_DB_DBI::sql_query(const char *query, int flags)
{
- const char *errmsg;
- Dmsg1(500, "my_dbi_query started %s\n", query);
- // We are starting a new query. reset everything.
- mdb->num_rows = -1;
- mdb->row_number = -1;
- mdb->field_number = -1;
-
- if (mdb->result) {
- dbi_result_free(mdb->result); /* hmm, someone forgot to free?? */
- mdb->result = NULL;
+ bool retval = false;
+ const char *dbi_errmsg;
+
+ Dmsg1(500, "sql_query starts with %s\n", query);
+
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
+
+ if (m_result) {
+ dbi_result_free(m_result); /* hmm, someone forgot to free?? */
+ m_result = NULL;
}
- mdb->result = (void **)dbi_conn_query(mdb->db, query);
+ m_result = (void **)dbi_conn_query(m_db_handle, query);
- if (!mdb->result) {
- Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
+ if (!m_result) {
+ Dmsg2(50, "Query failed: %s %p\n", query, m_result);
goto bail_out;
}
- mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db, &errmsg);
-
- if (mdb->status == DBI_ERROR_NONE) {
+ m_status = (dbi_error_flag) dbi_conn_error(m_db_handle, &dbi_errmsg);
+ if (m_status == DBI_ERROR_NONE) {
Dmsg1(500, "we have a result\n", query);
- // how many fields in the set?
- // num_fields starting at 1
- mdb->num_fields = dbi_result_get_numfields(mdb->result);
- Dmsg1(500, "we have %d fields\n", mdb->num_fields);
- // if no result num_rows is 0
- mdb->num_rows = dbi_result_get_numrows(mdb->result);
- Dmsg1(500, "we have %d rows\n", mdb->num_rows);
+ /*
+ * How many fields in the set?
+ * num_fields starting at 1
+ */
+ m_num_fields = dbi_result_get_numfields(m_result);
+ Dmsg1(500, "we have %d fields\n", m_num_fields);
+ /*
+ * If no result num_rows is 0
+ */
+ m_num_rows = dbi_result_get_numrows(m_result);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
- mdb->status = (dbi_error_flag) 0; /* succeed */
+ m_status = (dbi_error_flag) 0; /* succeed */
} else {
Dmsg1(50, "Result status failed: %s\n", query);
goto bail_out;
}
- Dmsg0(500, "my_dbi_query finishing\n");
- return mdb->status;
+ Dmsg0(500, "sql_query finishing\n");
+ retval = true;
+ goto ok_out;
bail_out:
- mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db,&errmsg);
- //dbi_conn_error(mdb->db, &errmsg);
- Dmsg4(500, "my_dbi_query we failed dbi error: "
- "'%s' '%p' '%d' flag '%d''\n", errmsg, mdb->result, mdb->result, mdb->status);
- dbi_result_free(mdb->result);
- mdb->result = NULL;
- mdb->status = (dbi_error_flag) 1; /* failed */
- return mdb->status;
+ m_status = (dbi_error_flag) dbi_conn_error(m_db_handle, &dbi_errmsg);
+ //dbi_conn_error(m_db_handle, &dbi_errmsg);
+ Dmsg4(500, "sql_query we failed dbi error: "
+ "'%s' '%p' '%d' flag '%d''\n", dbi_errmsg, m_result, m_result, m_status);
+ dbi_result_free(m_result);
+ m_result = NULL;
+ m_status = (dbi_error_flag) 1; /* failed */
+
+ok_out:
+ return retval;
}
-void my_dbi_free_result(B_DB *mdb)
+void B_DB_DBI::sql_free_result(void)
{
-
DBI_FIELD_GET *f;
- db_lock(mdb);
- if (mdb->result) {
- Dmsg1(500, "my_dbi_free_result result '%p'\n", mdb->result);
- dbi_result_free(mdb->result);
- }
-
- mdb->result = NULL;
- if (mdb->row) {
- free(mdb->row);
+ db_lock(this);
+ if (m_result) {
+ dbi_result_free(m_result);
+ m_result = NULL;
}
-
- /* now is time to free all value return by my_dbi_get_value
+ if (m_rows) {
+ free(m_rows);
+ m_rows = NULL;
+ }
+ /*
+ * Now is time to free all value return by dbi_get_value
* this is necessary because libdbi don't free memory return by yours results
- * and Bacula has some routine wich call more than once time my_dbi_fetch_row
+ * and Bacula has some routine wich call more than once time sql_fetch_row
*
* Using a queue to store all pointer allocate is a good way to free all things
* when necessary
*/
foreach_dlist(f, dbi_getvalue_list) {
- Dmsg2(500, "my_dbi_free_result field value: '%p' in queue: '%p'\n", f->value, f);
free(f->value);
free(f);
}
-
- mdb->row = NULL;
-
- if (mdb->fields) {
- free(mdb->fields);
- mdb->fields = NULL;
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
}
- db_unlock(mdb);
- Dmsg0(500, "my_dbi_free_result finish\n");
-
-}
-
-const char *my_dbi_strerror(B_DB *mdb)
-{
- const char *errmsg;
-
- dbi_conn_error(mdb->db, &errmsg);
-
- return errmsg;
+ m_num_rows = m_num_fields = 0;
+ db_unlock(this);
}
-#ifdef HAVE_BATCH_FILE_INSERT
-
-/*
- * This can be a bit strang but is the one way to do
+/* dbi_getvalue
+ * like PQgetvalue;
+ * char *PQgetvalue(const PGresult *res,
+ * int row_number,
+ * int column_number);
*
- * Returns 1 if OK
- * 0 if failed
+ * use dbi_result_seek_row to search in result set
+ * use example to return only strings
*/
-int my_dbi_batch_start(JCR *jcr, B_DB *mdb)
+static char *dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number)
{
- const char *query = "COPY batch FROM STDIN";
+ char *buf = NULL;
+ const char *dbi_errmsg;
+ const char *field_name;
+ unsigned short dbitype;
+ size_t field_length;
+ int64_t num;
- Dmsg0(500, "my_dbi_batch_start started\n");
+ /* correct the index for dbi interface
+ * dbi index begins 1
+ * I prefer do not change others functions
+ */
+ Dmsg3(600, "dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
+ result, row_number, column_number);
- switch (mdb->db_type) {
- case SQL_TYPE_MYSQL:
- db_lock(mdb);
- if (my_dbi_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
- "FileIndex integer,"
- "JobId integer,"
- "Path blob,"
- "Name blob,"
- "LStat tinyblob,"
- "MD5 tinyblob)") == 1)
- {
- Dmsg0(500, "my_dbi_batch_start failed\n");
- return 1;
- }
- db_unlock(mdb);
- Dmsg0(500, "my_dbi_batch_start finishing\n");
- return 1;
- break;
- case SQL_TYPE_POSTGRESQL:
+ column_number++;
- if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
- "fileindex int,"
- "jobid int,"
- "path varchar,"
- "name varchar,"
- "lstat varchar,"
- "md5 varchar)") == 1)
- {
- Dmsg0(500, "my_dbi_batch_start failed\n");
- return 1;
- }
+ if(row_number == 0) {
+ row_number++;
+ }
- // We are starting a new query. reset everything.
- mdb->num_rows = -1;
- mdb->row_number = -1;
- mdb->field_number = -1;
+ Dmsg3(600, "dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
+ result, row_number, column_number);
- my_dbi_free_result(mdb);
+ if(dbi_result_seek_row(result, row_number)) {
- for (int i=0; i < 10; i++) {
- my_dbi_query(mdb, query);
- if (mdb->result) {
- break;
- }
- bmicrosleep(5, 0);
- }
- if (!mdb->result) {
- Dmsg1(50, "Query failed: %s\n", query);
- goto bail_out;
- }
+ field_name = dbi_result_get_field_name(result, column_number);
+ field_length = dbi_result_get_field_length(result, field_name);
+ dbitype = dbi_result_get_field_type_idx(result,column_number);
- mdb->status = (dbi_error_flag)dbi_conn_error(mdb->db, NULL);
- //mdb->status = DBI_ERROR_NONE;
+ Dmsg3(500, "dbi_getvalue start: type: '%d' "
+ "field_length bytes: '%d' fieldname: '%s'\n",
+ dbitype, field_length, field_name);
- if (mdb->status == DBI_ERROR_NONE) {
- // how many fields in the set?
- mdb->num_fields = dbi_result_get_numfields(mdb->result);
- mdb->num_rows = dbi_result_get_numrows(mdb->result);
- mdb->status = (dbi_error_flag) 1;
+ if(field_length) {
+ //buf = (char *)malloc(sizeof(char *) * field_length + 1);
+ buf = (char *)malloc(field_length + 1);
} else {
- Dmsg1(50, "Result status failed: %s\n", query);
- goto bail_out;
+ /*
+ * if numbers
+ */
+ buf = (char *)malloc(sizeof(char *) * 50);
}
- Dmsg0(500, "my_postgresql_batch_start finishing\n");
+ switch (dbitype) {
+ case DBI_TYPE_INTEGER:
+ num = dbi_result_get_longlong(result, field_name);
+ edit_int64(num, buf);
+ field_length = strlen(buf);
+ break;
+ case DBI_TYPE_STRING:
+ if(field_length) {
+ field_length = bsnprintf(buf, field_length + 1, "%s",
+ dbi_result_get_string(result, field_name));
+ } else {
+ buf[0] = 0;
+ }
+ break;
+ case DBI_TYPE_BINARY:
+ /*
+ * dbi_result_get_binary return a NULL pointer if value is empty
+ * following, change this to what Bacula espected
+ */
+ if(field_length) {
+ field_length = bsnprintf(buf, field_length + 1, "%s",
+ dbi_result_get_binary(result, field_name));
+ } else {
+ buf[0] = 0;
+ }
+ break;
+ case DBI_TYPE_DATETIME:
+ time_t last;
+ struct tm tm;
+
+ last = dbi_result_get_datetime(result, field_name);
- return mdb->status;
- break;
- case SQL_TYPE_SQLITE:
- db_lock(mdb);
- if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
- "FileIndex integer,"
- "JobId integer,"
- "Path blob,"
- "Name blob,"
- "LStat tinyblob,"
- "MD5 tinyblob)") == 1)
- {
- Dmsg0(500, "my_dbi_batch_start failed\n");
- goto bail_out;
- }
- db_unlock(mdb);
- Dmsg0(500, "my_dbi_batch_start finishing\n");
- return 1;
- break;
- case SQL_TYPE_SQLITE3:
- db_lock(mdb);
- if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
- "FileIndex integer,"
- "JobId integer,"
- "Path blob,"
- "Name blob,"
- "LStat tinyblob,"
- "MD5 tinyblob)") == 1)
- {
- Dmsg0(500, "my_dbi_batch_start failed\n");
- goto bail_out;
+ if(last == -1) {
+ field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
+ } else {
+ (void)localtime_r(&last, &tm);
+ field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d",
+ (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ }
+ break;
}
- db_unlock(mdb);
- Dmsg0(500, "my_dbi_batch_start finishing\n");
- return 1;
- break;
+
+ } else {
+ dbi_conn_error(dbi_result_get_conn(result), &dbi_errmsg);
+ Dmsg1(500, "dbi_getvalue error: %s\n", dbi_errmsg);
}
-bail_out:
- Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), my_dbi_strerror(mdb));
- mdb->status = (dbi_error_flag) 0;
- my_dbi_free_result(mdb);
- mdb->result = NULL;
- return mdb->status;
+ Dmsg3(500, "dbi_getvalue finish buffer: '%p' num bytes: '%d' data: '%s'\n",
+ buf, field_length, buf);
+
+ /*
+ * Don't worry about this buf
+ */
+ return buf;
}
-/* set error to something to abort operation */
-int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error)
+SQL_ROW B_DB_DBI::sql_fetch_row(void)
{
- int res = 0;
- int count = 30;
- int (*custom_function)(void*, const char*) = NULL;
- dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
-
- Dmsg0(500, "my_dbi_batch_end started\n");
+ int j;
+ SQL_ROW row = NULL; /* by default, return NULL */
+
+ Dmsg0(500, "sql_fetch_row start\n");
+ if ((!m_rows || m_rows_size < m_num_fields) && m_num_rows > 0) {
+ if (m_rows) {
+ Dmsg0(500, "sql_fetch_row freeing space\n");
+ Dmsg2(500, "sql_fetch_row row: '%p' num_fields: '%d'\n", m_rows, m_num_fields);
+ if (m_num_rows != 0) {
+ for (j = 0; j < m_num_fields; j++) {
+ Dmsg2(500, "sql_fetch_row row '%p' '%d'\n", m_rows[j], j);
+ if (m_rows[j]) {
+ free(m_rows[j]);
+ }
+ }
+ }
+ free(m_rows);
+ }
+ Dmsg1(500, "we need space for %d bytes\n", sizeof(char *) * m_num_fields);
+ m_rows = (SQL_ROW)malloc(sizeof(char *) * m_num_fields);
+ m_rows_size = m_num_fields;
- if (!mdb) { /* no files ? */
- return 0;
+ /*
+ * Now reset the row_number now that we have the space allocated
+ */
+ m_row_number = 1;
}
- switch (mdb->db_type) {
- case SQL_TYPE_MYSQL:
- if(mdb) {
- mdb->status = (dbi_error_flag) 0;
+ /*
+ * If still within the result set
+ */
+ if (m_row_number <= m_num_rows && m_row_number != DBI_ERROR_BADPTR) {
+ Dmsg2(500, "sql_fetch_row row number '%d' is acceptable (1..%d)\n", m_row_number, m_num_rows);
+ /*
+ * Get each value from this row
+ */
+ for (j = 0; j < m_num_fields; j++) {
+ m_rows[j] = dbi_getvalue(m_result, m_row_number, j);
+ /*
+ * Allocate space to queue row
+ */
+ m_field_get = (DBI_FIELD_GET *)malloc(sizeof(DBI_FIELD_GET));
+ /*
+ * Store the pointer in queue
+ */
+ m_field_get->value = m_rows[j];
+ Dmsg4(500, "sql_fetch_row row[%d] field: '%p' in queue: '%p' has value: '%s'\n",
+ j, m_rows[j], m_field_get->value, m_rows[j]);
+ /*
+ * Insert in queue to future free
+ */
+ dbi_getvalue_list->append(m_field_get);
}
- break;
- case SQL_TYPE_POSTGRESQL:
- custom_function = (custom_function_end_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQputCopyEnd");
+ /*
+ * Increment the row number for the next call
+ */
+ m_row_number++;
+ row = m_rows;
+ } else {
+ Dmsg2(500, "sql_fetch_row row number '%d' is NOT acceptable (1..%d)\n", m_row_number, m_num_rows);
+ }
- do {
- res = (*custom_function)(myconn->connection, error);
- } while (res == 0 && --count > 0);
+ Dmsg1(500, "sql_fetch_row finishes returning %p\n", row);
- if (res == 1) {
- Dmsg0(500, "ok\n");
- mdb->status = (dbi_error_flag) 1;
- }
+ return row;
+}
- if (res <= 0) {
- Dmsg0(500, "we failed\n");
- mdb->status = (dbi_error_flag) 0;
- //Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
- }
- break;
- case SQL_TYPE_SQLITE:
- if(mdb) {
- mdb->status = (dbi_error_flag) 0;
- }
- break;
- case SQL_TYPE_SQLITE3:
- if(mdb) {
- mdb->status = (dbi_error_flag) 0;
- }
- break;
- }
+const char *B_DB_DBI::sql_strerror(void)
+{
+ const char *dbi_errmsg;
- Dmsg0(500, "my_dbi_batch_end finishing\n");
+ dbi_conn_error(m_db_handle, &dbi_errmsg);
- return true;
+ return dbi_errmsg;
}
-/*
- * This function is big and use a big switch.
- * In near future is better split in small functions
- * and refactory.
- *
- */
-int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+void B_DB_DBI::sql_data_seek(int row)
{
- int res;
- int count=30;
- dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
- int (*custom_function)(void*, const char*, int) = NULL;
- char* (*custom_function_error)(void*) = NULL;
- size_t len;
- char *digest;
- char ed1[50];
+ /*
+ * Set the row number to be returned on the next call to sql_fetch_row
+ */
+ m_row_number = row;
+}
- Dmsg0(500, "my_dbi_batch_insert started \n");
+int B_DB_DBI::sql_affected_rows(void)
+{
+#if 0
+ return dbi_result_get_numrows_affected(result);
+#else
+ return 1;
+#endif
+}
- mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
- mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
+uint64_t B_DB_DBI::sql_insert_autokey_record(const char *query, const char *table_name)
+{
+ char sequence[30];
+ uint64_t id = 0;
- if (ar->Digest == NULL || ar->Digest[0] == 0) {
- *digest = '\0';
- } else {
- digest = ar->Digest;
+ /*
+ * First execute the insert query and then retrieve the currval.
+ */
+ if (!sql_query(query)) {
+ return 0;
}
- switch (mdb->db_type) {
- case SQL_TYPE_MYSQL:
- db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
- db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
- len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest);
+ m_num_rows = sql_affected_rows();
+ if (m_num_rows != 1) {
+ return 0;
+ }
- if (my_dbi_query(mdb,mdb->cmd) == 1)
- {
- Dmsg0(500, "my_dbi_batch_insert failed\n");
- goto bail_out;
+ changes++;
+
+ /*
+ * Obtain the current value of the sequence that
+ * provides the serial value for primary key of the table.
+ *
+ * currval is local to our session. It is not affected by
+ * other transactions.
+ *
+ * Determine the name of the sequence.
+ * PostgreSQL automatically creates a sequence using
+ * <table>_<column>_seq.
+ * At the time of writing, all tables used this format for
+ * for their primary key: <table>id
+ * Except for basefiles which has a primary key on baseid.
+ * Therefore, we need to special case that one table.
+ *
+ * everything else can use the PostgreSQL formula.
+ */
+ if (m_db_type == SQL_TYPE_POSTGRESQL) {
+ if (strcasecmp(table_name, "basefiles") == 0) {
+ bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
+ } else {
+ bstrncpy(sequence, table_name, sizeof(sequence));
+ bstrncat(sequence, "_", sizeof(sequence));
+ bstrncat(sequence, table_name, sizeof(sequence));
+ bstrncat(sequence, "id", sizeof(sequence));
}
- Dmsg0(500, "my_dbi_batch_insert finishing\n");
+ bstrncat(sequence, "_seq", sizeof(sequence));
+ id = dbi_conn_sequence_last(m_db_handle, NT_(sequence));
+ } else {
+ id = dbi_conn_sequence_last(m_db_handle, NT_(table_name));
+ }
- return 1;
- break;
- case SQL_TYPE_POSTGRESQL:
- my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
- my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
- len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
- ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest);
+ return id;
+}
- /* libdbi don't support CopyData and we need call a postgresql
- * specific function to do this work
- */
- Dmsg2(500, "my_dbi_batch_insert :\n %s \ncmd_size: %d",mdb->cmd, len);
- if ((custom_function = (custom_function_insert_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db),
- "PQputCopyData")) != NULL) {
- do {
- res = (*custom_function)(myconn->connection, mdb->cmd, len);
- } while (res == 0 && --count > 0);
+/* dbi_getisnull
+ * like PQgetisnull
+ * int PQgetisnull(const PGresult *res,
+ * int row_number,
+ * int column_number);
+ *
+ * use dbi_result_seek_row to search in result set
+ */
+static int dbi_getisnull(dbi_result *result, int row_number, int column_number) {
+ int i;
- if (res == 1) {
- Dmsg0(500, "ok\n");
- mdb->changes++;
- mdb->status = (dbi_error_flag) 1;
- }
+ if (row_number == 0) {
+ row_number++;
+ }
- if (res <= 0) {
- Dmsg0(500, "my_dbi_batch_insert failed\n");
- goto bail_out;
- }
+ column_number++;
- Dmsg0(500, "my_dbi_batch_insert finishing\n");
- return mdb->status;
- } else {
- // ensure to detect a PQerror
- custom_function_error = (custom_function_error_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQerrorMessage");
- Dmsg1(500, "my_dbi_batch_insert failed\n PQerrorMessage: %s", (*custom_function_error)(myconn->connection));
- goto bail_out;
- }
- break;
- case SQL_TYPE_SQLITE:
- db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
- db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
- len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest);
- if (my_dbi_query(mdb,mdb->cmd) == 1)
- {
- Dmsg0(500, "my_dbi_batch_insert failed\n");
- goto bail_out;
- }
+ if (dbi_result_seek_row(result, row_number)) {
+ i = dbi_result_field_is_null_idx(result,column_number);
+ return i;
+ } else {
+ return 0;
+ }
+}
- Dmsg0(500, "my_dbi_batch_insert finishing\n");
+SQL_FIELD *B_DB_DBI::sql_fetch_field(void)
+{
+ int i, j;
+ int dbi_index;
+ int max_length;
+ int this_length;
+ char *cbuf = NULL;
- return 1;
- break;
- case SQL_TYPE_SQLITE3:
- db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
- db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
- len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest);
- if (my_dbi_query(mdb,mdb->cmd) == 1)
- {
- Dmsg0(500, "my_dbi_batch_insert failed\n");
- goto bail_out;
+ Dmsg0(500, "sql_fetch_field starts\n");
+
+ if (!m_fields || m_fields_size < m_num_fields) {
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
}
+ Dmsg1(500, "allocating space for %d fields\n", m_num_fields);
+ m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * m_num_fields);
+ m_fields_size = m_num_fields;
+
+ for (i = 0; i < m_num_fields; i++) {
+ /*
+ * num_fields is starting at 1, increment i by 1
+ */
+ dbi_index = i + 1;
+ Dmsg1(500, "filling field %d\n", i);
+ m_fields[i].name = (char *)dbi_result_get_field_name(m_result, dbi_index);
+ m_fields[i].type = dbi_result_get_field_type_idx(m_result, dbi_index);
+ m_fields[i].flags = dbi_result_get_field_attribs_idx(m_result, dbi_index);
+
+ /*
+ * For a given column, find the max length.
+ */
+ max_length = 0;
+ for (j = 0; j < m_num_rows; j++) {
+ if (dbi_getisnull(m_result, j, dbi_index)) {
+ this_length = 4; /* "NULL" */
+ } else {
+ cbuf = dbi_getvalue(m_result, j, dbi_index);
+ this_length = cstrlen(cbuf);
+ /*
+ * cbuf is always free
+ */
+ free(cbuf);
+ }
+
+ if (max_length < this_length) {
+ max_length = this_length;
+ }
+ }
+ m_fields[i].max_length = max_length;
- Dmsg0(500, "my_dbi_batch_insert finishing\n");
+ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ m_fields[i].name, m_fields[i].max_length, m_fields[i].type, m_fields[i].flags);
+ }
+ }
- return 1;
- break;
+ /*
+ * Increment field number for the next time around
+ */
+ return &m_fields[m_field_number++];
+}
+
+bool B_DB_DBI::sql_field_is_not_null(int field_type)
+{
+ switch (field_type) {
+ case (1 << 0):
+ return true;
+ default:
+ return false;
}
+}
-bail_out:
- Mmsg1(&mdb->errmsg, _("error inserting batch mode: %s"), my_dbi_strerror(mdb));
- mdb->status = (dbi_error_flag) 0;
- my_dbi_free_result(mdb);
- return mdb->status;
+bool B_DB_DBI::sql_field_is_numeric(int field_type)
+{
+ switch (field_type) {
+ case 1:
+ case 2:
+ return true;
+ default:
+ return false;
+ }
}
/*
* string must be long enough (max 2*old+1) to hold
* the escaped output.
*/
-char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
+static char *postgresql_copy_escape(char *dest, char *src, size_t len)
{
- /* we have to escape \t, \n, \r, \ */
+ /*
+ * We have to escape \t, \n, \r, \
+ */
char c = '\0' ;
while (len > 0 && *src) {
return dest;
}
-#endif /* HAVE_BATCH_FILE_INSERT */
-
-/* my_dbi_getisnull
- * like PQgetisnull
- * int PQgetisnull(const PGresult *res,
- * int row_number,
- * int column_number);
+/*
+ * This can be a bit strang but is the one way to do
*
- * use dbi_result_seek_row to search in result set
+ * Returns true if OK
+ * false if failed
*/
-int my_dbi_getisnull(dbi_result *result, int row_number, int column_number) {
- int i;
-
- if(row_number == 0) {
- row_number++;
- }
-
- column_number++;
-
- if(dbi_result_seek_row(result, row_number)) {
+bool B_DB_DBI::sql_batch_start(JCR *jcr)
+{
+ bool retval = true;
+ const char *query = "COPY batch FROM STDIN";
- i = dbi_result_field_is_null_idx(result,column_number);
+ Dmsg0(500, "sql_batch_start started\n");
- return i;
- } else {
+ db_lock(this);
+ switch (m_db_type) {
+ case SQL_TYPE_MYSQL:
+ if (!sql_query("CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob,"
+ "MarkId integer)")) {
+ Dmsg0(500, "sql_batch_start failed\n");
+ goto bail_out;
+ }
+ Dmsg0(500, "sql_batch_start finishing\n");
+ goto ok_out;
+ case SQL_TYPE_POSTGRESQL:
+ if (!sql_query("CREATE TEMPORARY TABLE batch ("
+ "fileindex int,"
+ "jobid int,"
+ "path varchar,"
+ "name varchar,"
+ "lstat varchar,"
+ "md5 varchar,"
+ "markid int)")) {
+ Dmsg0(500, "sql_batch_start failed\n");
+ goto bail_out;
+ }
- return 0;
- }
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
-}
-/* my_dbi_getvalue
- * like PQgetvalue;
- * char *PQgetvalue(const PGresult *res,
- * int row_number,
- * int column_number);
- *
- * use dbi_result_seek_row to search in result set
- * use example to return only strings
- */
-char *my_dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number) {
+ sql_free_result();
- char *buf = NULL;
- const char *errmsg;
- const char *field_name;
- unsigned short dbitype;
- size_t field_length;
- int64_t num;
+ for (int i=0; i < 10; i++) {
+ sql_query(query);
+ if (m_result) {
+ break;
+ }
+ bmicrosleep(5, 0);
+ }
+ if (!m_result) {
+ Dmsg1(50, "Query failed: %s\n", query);
+ goto bail_out;
+ }
- /* correct the index for dbi interface
- * dbi index begins 1
- * I prefer do not change others functions
- */
- Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
- result, row_number, column_number);
+ m_status = (dbi_error_flag)dbi_conn_error(m_db_handle, NULL);
+ //m_status = DBI_ERROR_NONE;
- column_number++;
+ if (m_status == DBI_ERROR_NONE) {
+ /*
+ * How many fields in the set?
+ */
+ m_num_fields = dbi_result_get_numfields(m_result);
+ m_num_rows = dbi_result_get_numrows(m_result);
+ m_status = (dbi_error_flag) 1;
+ } else {
+ Dmsg1(50, "Result status failed: %s\n", query);
+ goto bail_out;
+ }
- if(row_number == 0) {
- row_number++;
+ Dmsg0(500, "sql_batch_start finishing\n");
+ goto ok_out;
+ case SQL_TYPE_SQLITE3:
+ if (!sql_query("CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob,"
+ "MarkId integer)")) {
+ Dmsg0(500, "sql_batch_start failed\n");
+ goto bail_out;
+ }
+ Dmsg0(500, "sql_batch_start finishing\n");
+ goto ok_out;
}
- Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
- result, row_number, column_number);
-
- if(dbi_result_seek_row(result, row_number)) {
+bail_out:
+ Mmsg1(&errmsg, _("error starting batch mode: %s"), sql_strerror());
+ m_status = (dbi_error_flag) 0;
+ sql_free_result();
+ m_result = NULL;
+ retval = false;
+
+ok_out:
+ db_unlock(this);
+ return retval;
+}
- field_name = dbi_result_get_field_name(result, column_number);
- field_length = dbi_result_get_field_length(result, field_name);
- dbitype = dbi_result_get_field_type_idx(result,column_number);
+/*
+ * Set error to something to abort operation
+ */
+bool B_DB_DBI::sql_batch_end(JCR *jcr, const char *error)
+{
+ int res = 0;
+ int count = 30;
+ int (*custom_function)(void*, const char*) = NULL;
+ dbi_conn_t *myconn = (dbi_conn_t *)(m_db_handle);
- Dmsg3(500, "my_dbi_getvalue start: type: '%d' "
- "field_length bytes: '%d' fieldname: '%s'\n",
- dbitype, field_length, field_name);
+ Dmsg0(500, "sql_batch_start started\n");
- if(field_length) {
- //buf = (char *)malloc(sizeof(char *) * field_length + 1);
- buf = (char *)malloc(field_length + 1);
- } else {
- /* if numbers */
- buf = (char *)malloc(sizeof(char *) * 50);
- }
-
- switch (dbitype) {
- case DBI_TYPE_INTEGER:
- num = dbi_result_get_longlong(result, field_name);
- edit_int64(num, buf);
- field_length = strlen(buf);
- break;
- case DBI_TYPE_STRING:
- if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
- dbi_result_get_string(result, field_name));
- } else {
- buf[0] = 0;
- }
- break;
- case DBI_TYPE_BINARY:
- /* dbi_result_get_binary return a NULL pointer if value is empty
- * following, change this to what Bacula espected
- */
- if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
- dbi_result_get_binary(result, field_name));
- } else {
- buf[0] = 0;
- }
- break;
- case DBI_TYPE_DATETIME:
- time_t last;
- struct tm tm;
+ switch (m_db_type) {
+ case SQL_TYPE_MYSQL:
+ m_status = (dbi_error_flag) 0;
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ custom_function = (custom_function_end_t)dbi_driver_specific_function(dbi_conn_get_driver(myconn), "PQputCopyEnd");
- last = dbi_result_get_datetime(result, field_name);
+ do {
+ res = (*custom_function)(myconn->connection, error);
+ } while (res == 0 && --count > 0);
- if(last == -1) {
- field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
- } else {
- (void)localtime_r(&last, &tm);
- field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d",
- (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
- }
- break;
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ m_status = (dbi_error_flag) 1;
}
- } else {
- dbi_conn_error(dbi_result_get_conn(result), &errmsg);
- Dmsg1(500, "my_dbi_getvalue error: %s\n", errmsg);
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ m_status = (dbi_error_flag) 0;
+ //Mmsg1(&errmsg, _("error ending batch mode: %s"), PQerrorMessage(myconn));
+ }
+ break;
+ case SQL_TYPE_SQLITE3:
+ m_status = (dbi_error_flag) 0;
+ break;
}
- Dmsg3(500, "my_dbi_getvalue finish buffer: '%p' num bytes: '%d' data: '%s'\n",
- buf, field_length, buf);
+ Dmsg0(500, "sql_batch_start finishing\n");
- // don't worry about this buf
- return buf;
+ return true;
}
-static uint64_t my_dbi_sequence_last(B_DB *mdb, const char *table_name)
+/*
+ * This function is big and use a big switch.
+ * In near future is better split in small functions
+ * and refactory.
+ */
+bool B_DB_DBI::sql_batch_insert(JCR *jcr, ATTR_DBR *ar)
{
- /*
- Obtain the current value of the sequence that
- provides the serial value for primary key of the table.
+ int res;
+ int count=30;
+ dbi_conn_t *myconn = (dbi_conn_t *)(m_db_handle);
+ int (*custom_function)(void*, const char*, int) = NULL;
+ char* (*custom_function_error)(void*) = NULL;
+ size_t len;
+ char *digest;
+ char ed1[50];
- currval is local to our session. It is not affected by
- other transactions.
+ Dmsg0(500, "sql_batch_start started \n");
- Determine the name of the sequence.
- PostgreSQL automatically creates a sequence using
- <table>_<column>_seq.
- At the time of writing, all tables used this format for
- for their primary key: <table>id
- Except for basefiles which has a primary key on baseid.
- Therefore, we need to special case that one table.
+ esc_name = check_pool_memory_size(esc_name, fnl*2+1);
+ esc_path = check_pool_memory_size(esc_path, pnl*2+1);
- everything else can use the PostgreSQL formula.
- */
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ *digest = '\0';
+ } else {
+ digest = ar->Digest;
+ }
- char sequence[30];
- uint64_t id = 0;
+ switch (m_db_type) {
+ case SQL_TYPE_MYSQL:
+ db_escape_string(jcr, esc_name, fname, fnl);
+ db_escape_string(jcr, esc_path, path, pnl);
+ len = Mmsg(cmd, "INSERT INTO batch VALUES "
+ "(%u,%s,'%s','%s','%s','%s',%u)",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ if (!sql_query(cmd))
+ {
+ Dmsg0(500, "sql_batch_start failed\n");
+ goto bail_out;
+ }
- if (mdb->db_type == SQL_TYPE_POSTGRESQL) {
+ Dmsg0(500, "sql_batch_start finishing\n");
- if (strcasecmp(table_name, "basefiles") == 0) {
- bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
+ return true;
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ postgresql_copy_escape(esc_name, fname, fnl);
+ postgresql_copy_escape(esc_path, path, pnl);
+ len = Mmsg(cmd, "%u\t%s\t%s\t%s\t%s\t%s\t%u\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ /*
+ * libdbi don't support CopyData and we need call a postgresql
+ * specific function to do this work
+ */
+ Dmsg2(500, "sql_batch_insert :\n %s \ncmd_size: %d",cmd, len);
+ custom_function = (custom_function_insert_t)dbi_driver_specific_function(dbi_conn_get_driver(myconn),"PQputCopyData");
+ if (custom_function != NULL) {
+ do {
+ res = (*custom_function)(myconn->connection, cmd, len);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ changes++;
+ m_status = (dbi_error_flag) 1;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "sql_batch_insert failed\n");
+ goto bail_out;
+ }
+
+ Dmsg0(500, "sql_batch_insert finishing\n");
+ return true;
} else {
- bstrncpy(sequence, table_name, sizeof(sequence));
- bstrncat(sequence, "_", sizeof(sequence));
- bstrncat(sequence, table_name, sizeof(sequence));
- bstrncat(sequence, "id", sizeof(sequence));
+ /*
+ * Ensure to detect a PQerror
+ */
+ custom_function_error = (custom_function_error_t)dbi_driver_specific_function(dbi_conn_get_driver(myconn), "PQerrorMessage");
+ Dmsg1(500, "sql_batch_insert failed\n PQerrorMessage: %s", (*custom_function_error)(myconn->connection));
+ goto bail_out;
+ }
+ break;
+ case SQL_TYPE_SQLITE3:
+ db_escape_string(jcr, esc_name, fname, fnl);
+ db_escape_string(jcr, esc_path, path, pnl);
+ len = Mmsg(cmd, "INSERT INTO batch VALUES "
+ "(%u,%s,'%s','%s','%s','%s',%u)",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ if (!sql_query(cmd))
+ {
+ Dmsg0(500, "sql_batch_insert failed\n");
+ goto bail_out;
}
- bstrncat(sequence, "_seq", sizeof(sequence));
- id = dbi_conn_sequence_last(mdb->db, NT_(sequence));
- } else {
- id = dbi_conn_sequence_last(mdb->db, NT_(table_name));
+ Dmsg0(500, "sql_batch_insert finishing\n");
+
+ return true;
+ break;
}
- return id;
+bail_out:
+ Mmsg1(&errmsg, _("error inserting batch mode: %s"), sql_strerror());
+ m_status = (dbi_error_flag) 0;
+ sql_free_result();
+ return false;
}
-uint64_t my_dbi_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+/*
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
+ */
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
+ const char *db_password, const char *db_address, int db_port,
+ const char *db_socket, bool mult_db_connections, bool disable_batch_insert)
{
- /*
- * First execute the insert query and then retrieve the currval.
- */
- if (my_dbi_query(mdb, query)) {
- return 0;
+ B_DB_DBI *mdb = NULL;
+
+ if (!db_driver) {
+ Jmsg(jcr, M_ABORT, 0, _("Driver type not specified in Catalog resource.\n"));
}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows != 1) {
- return 0;
+ if (strlen(db_driver) < 5 || db_driver[3] != ':' || strncasecmp(db_driver, "dbi", 3) != 0) {
+ Jmsg(jcr, M_ABORT, 0, _("Invalid driver type, must be \"dbi:<type>\"\n"));
}
- mdb->changes++;
+ if (!db_user) {
+ Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n"));
+ return NULL;
+ }
- return my_dbi_sequence_last(mdb, table_name);
-}
+ P(mutex); /* lock DB queue */
+ if (db_list && !mult_db_connections) {
+ /*
+ * Look to see if DB already open
+ */
+ foreach_dlist(mdb, db_list) {
+ if (mdb->db_match_database(db_driver, db_name, db_address, db_port)) {
+ Dmsg1(100, "DB REopen %s\n", db_name);
+ mdb->increment_refcount();
+ goto bail_out;
+ }
+ }
+ }
+ Dmsg0(100, "db_init_database first time\n");
+ mdb = New(B_DB_DBI(jcr, db_driver, db_name, db_user, db_password, db_address,
+ db_port, db_socket, mult_db_connections, disable_batch_insert));
-#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_dbi_batch_lock_path_query[5] = {
- /* Mysql */
- "LOCK TABLES Path write, batch write, Path as p write",
- /* Postgresql */
- "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",
- /* SQLite */
- "BEGIN",
- /* SQLite3 */
- "BEGIN",
- /* Ingres */
- "BEGIN"
-};
-
-const char *my_dbi_batch_lock_filename_query[5] = {
- /* Mysql */
- "LOCK TABLES Filename write, batch write, Filename as f write",
- /* Postgresql */
- "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",
- /* SQLite */
- "BEGIN",
- /* SQLite3 */
- "BEGIN",
- /* Ingres */
- "BEGIN"
-};
-
-const char *my_dbi_batch_unlock_tables_query[5] = {
- /* Mysql */
- "UNLOCK TABLES",
- /* Postgresql */
- "COMMIT",
- /* SQLite */
- "COMMIT",
- /* SQLite3 */
- "COMMIT",
- /* Ingres */
- "COMMIT"
-};
-
-const char *my_dbi_batch_fill_path_query[5] = {
- /* Mysql */
- "INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
- "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
- /* Postgresql */
- "INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a "
- "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
- /* SQLite */
- "INSERT INTO Path (Path)"
- " SELECT DISTINCT Path FROM batch"
- " EXCEPT SELECT Path FROM Path",
- /* SQLite3 */
- "INSERT INTO Path (Path)"
- " SELECT DISTINCT Path FROM batch"
- " EXCEPT SELECT Path FROM Path",
- /* Ingres */
- "INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a "
- "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) "
-};
-
-const char *my_dbi_batch_fill_filename_query[5] = {
- /* Mysql */
- "INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
- "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",
- /* Postgresql */
- "INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) as a "
- "WHERE NOT EXISTS "
- "(SELECT Name FROM Filename WHERE Name = a.Name)",
- /* SQLite */
- "INSERT INTO Filename (Name)"
- " SELECT DISTINCT Name FROM batch "
- " EXCEPT SELECT Name FROM Filename",
- /* SQLite3 */
- "INSERT INTO Filename (Name)"
- " SELECT DISTINCT Name FROM batch "
- " EXCEPT SELECT Name FROM Filename",
- /* Ingres */
- "INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) as a "
- "WHERE NOT EXISTS "
- "(SELECT Name FROM Filename WHERE Name = a.Name)"
-};
-
-#endif /* HAVE_BATCH_FILE_INSERT */
-
-const char *my_dbi_match[5] = {
- /* Mysql */
- "MATCH",
- /* Postgresql */
- "~",
- /* SQLite */
- "MATCH",
- /* SQLite3 */
- "MATCH",
- /* Ingres */
- "~"
-};
+bail_out:
+ V(mutex);
+ return mdb;
+}
#endif /* HAVE_DBI */
#!/bin/sh
#
# Drop Bacula database -- works for whatever is configured,
-# MySQL, SQLite, PostgreSQL, Ingres
+# MySQL, SQLite, PostgreSQL, Ingres
#
-if test xsqlite3 = x@DB_TYPE@ ; then
- @scriptdir@/drop_@DB_TYPE@_database $*
-else
- if test xmysql = x@DB_TYPE@ ; then
- echo "Making MySQL database"
- @scriptdir@/drop_mysql_database $*
- elif test xingres = x@DB_TYPE@ ; then
- echo "Dropping Ingres database"
- @scriptdir@/drop_ingres_database $*
- else
- @scriptdir@/drop_postgresql_database $*
- fi
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+echo "Dropping ${db_type} database"
+@scriptdir@/drop_${db_type}_database $*
#!/bin/sh
#
# Drop Bacula tables -- works for whatever is configured,
-# MySQL, SQLite, Ingres, or PostgreSQL
+# MySQL, SQLite, Ingres, or PostgreSQL
#
-if test xsqlite3 = x@DB_TYPE@ ; then
- @scriptdir@/drop_@DB_TYPE@_tables $*
- echo "Dropped SQLite tables"
-else
- if test xmysql = x@DB_TYPE@ ; then
- echo "Making MySQL tables"
- @scriptdir@/drop_mysql_tables $*
- echo "Dropped MySQL tables"
- elif test xingres = x@DB_TYPE@ ; then
- echo "Dropping Ingres tables"
- @scriptdir@/drop_ingres_tables $*
- echo "Dropped Ingres tables"
- else
- # hardcoded database name - should be a parameter
- @scriptdir@/drop_postgresql_tables $*
- echo "Dropped PostgreSQL tables"
- fi
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+@scriptdir@/drop_${db_type}_tables $*
+echo "Dropped ${db_type} tables"
# shell script to drop Bacula database(s)
#
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
db_user=${db_user:-@db_user@}
#
# shell script to delete Bacula tables for PostgreSQL
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
db_user=${db_user:-@db_user@}
# shell script to drop Bacula database(s)
#
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
db_name=@db_name@
-if test xmysql = x@DB_TYPE@ ; then
- $bindir/mysql $* -f <<END-OF-DATA
- DROP DATABASE ${db_name};
+$bindir/mysql $* -f <<END-OF-DATA
+ DROP DATABASE ${db_name};
END-OF-DATA
- if test $? -eq 0 ; then
- echo "Drop of ${db_name} database succeeded."
- else
- echo "Drop of ${db_name} database failed."
- fi
+if test $? -eq 0 ; then
+ echo "Drop of ${db_name} database succeeded."
else
- echo "Bacula is not configured for a MySQL database."
+ echo "Drop of ${db_name} database failed."
fi
exit 0
#
# shell script to delete Bacula tables for MySQL
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
db_name=@db_name@
if $bindir/mysql $* <<END-OF-DATA
# shell script to drop Bacula database(s)
#
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
db_name=@db_name@
if $bindir/dropdb ${db_name}
#
# shell script to delete Bacula tables for PostgreSQL
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
db_name=@db_name@
$bindir/psql -f - -d ${db_name} $* <<END-OF-DATA
# shell script to drop Bacula SQLite tables
db_name=@db_name@
-
-if test xsqlite = x@DB_TYPE@ -o xsqlite3 = x@DB_TYPE@ ; then
- cd @working_dir@
- rm -rf ${db_name}.db
- echo "SQLite database dropped."
-else
- echo "Bacula is not configured for an SQLite database."
-fi
+cd @working_dir@
+rm -rf ${db_name}.db
+echo "SQLite database dropped."
#!/bin/sh
#
# This routine makes the appropriately configured
-# Bacula tables for PostgreSQL or MySQL.
+# Bacula tables for PostgreSQL or MySQL.
# SQLite does not have permissions.
#
-if test xmysql = x@DB_TYPE@ ; then
- echo "Granting MySQL privileges"
- @scriptdir@/grant_mysql_privileges $*
-else
- if test xpostgresql = x@DB_TYPE@ ; then
- echo "Granting PostgreSQL privileges"
- @scriptdir@/grant_postgresql_privileges $*
- elif test xingres = x@DB_TYPE@ ; then
- echo "Granting Ingres privileges"
- @scriptdir@/grant_ingres_privileges $*
- else
- if test xsqlite3 = x@DB_TYPE@ ; then
- echo "Granting SQLite privileges"
- @scriptdir@/grant_@DB_TYPE@_privileges $*
- fi
- fi
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+echo "Granting ${db_type} privileges"
+@scriptdir@/grant_${db_type}_privileges $*
#
# shell script TO GRANT privileges to the bacula database
#
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
db_user=${db_user:-@db_user@}
# shell script to grant privileges to the bacula database
#
db_user=${db_user:-@db_user@}
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
db_name=${db_name:-@db_name@}
db_password=@db_password@
if [ "$db_password" != "" ]; then
# shell script to grant privileges to the bacula database
#
db_user=${db_user:-@db_user@}
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
db_name=${db_name:-@db_name@}
db_password=@db_password@
if [ "$db_password" != "" ]; then
#
# shell script to grant privileges to the bacula database
#
-bindir=@SQL_BINDIR@
+bindir=@SQLITE_BINDIR@
-# nothing to do here
\ No newline at end of file
+# nothing to do here
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2003-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Bacula Catalog Database routines specific to Ingres
+ * These are Ingres specific routines
+ *
+ * Stefan Reddig, June 2009 with help of Marco van Wieringen April 2010
+ * based uopn work done
+ * by Dan Langille, December 2003 and
+ * by Kern Sibbald, March 2000
+ *
+ * Major rewrite by Marco van Wieringen, January 2010 for catalog refactoring.
+ */
+
+#include "bacula.h"
+
+#ifdef HAVE_INGRES
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "myingres.h"
+#include "bdb_ingres.h"
+#include "lib/breg.h"
+
+/* -----------------------------------------------------------------------
+ *
+ * Ingres dependent defines and subroutines
+ *
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * List of open databases.
+ */
+static dlist *db_list = NULL;
+
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+
+struct B_DB_RWRULE {
+ int pattern_length;
+ char *search_pattern;
+ BREGEXP *rewrite_regexp;
+ bool trigger;
+};
+
+/*
+ * Create a new query filter.
+ */
+static bool db_allocate_query_filter(JCR *jcr, alist *query_filters, int pattern_length,
+ const char *search_pattern, const char *filter)
+{
+ B_DB_RWRULE *rewrite_rule;
+
+ rewrite_rule = (B_DB_RWRULE *)malloc(sizeof(B_DB_RWRULE));
+
+ rewrite_rule->pattern_length = pattern_length;
+ rewrite_rule->search_pattern = bstrdup(search_pattern);
+ rewrite_rule->rewrite_regexp = new_bregexp(filter);
+ rewrite_rule->trigger = false;
+
+ if (!rewrite_rule->rewrite_regexp) {
+ Jmsg(jcr, M_FATAL, 0, _("Failed to allocate space for query filter.\n"));
+ free(rewrite_rule->search_pattern);
+ free(rewrite_rule);
+ return false;
+ } else {
+ query_filters->append(rewrite_rule);
+ return true;
+ }
+}
+
+/*
+ * Create a stack of all filters that should be applied to a SQL query
+ * before submitting it to the database backend.
+ */
+static inline alist *db_initialize_query_filters(JCR *jcr)
+{
+ alist *query_filters;
+
+ query_filters = New(alist(10, not_owned_by_alist));
+
+ if (!query_filters) {
+ Jmsg(jcr, M_FATAL, 0, _("Failed to allocate space for query filters.\n"));
+ return NULL;
+ }
+
+ db_allocate_query_filter(jcr, query_filters, 6, "OFFSET",
+ "/LIMIT ([0-9]+) OFFSET ([0-9]+)/OFFSET $2 FETCH NEXT $1 ROWS ONLY/ig");
+ db_allocate_query_filter(jcr, query_filters, 5, "LIMIT",
+ "/LIMIT ([0-9]+)/FETCH FIRST $1 ROWS ONLY/ig");
+ db_allocate_query_filter(jcr, query_filters, 9, "TEMPORARY",
+ "/CREATE TEMPORARY TABLE (.+)/DECLARE GLOBAL TEMPORARY TABLE $1 ON COMMIT PRESERVE ROWS WITH NORECOVERY/i");
+
+ return query_filters;
+}
+
+/*
+ * Free all query filters.
+ */
+static inline void db_destroy_query_filters(alist *query_filters)
+{
+ B_DB_RWRULE *rewrite_rule;
+
+ foreach_alist(rewrite_rule, query_filters) {
+ free_bregexp(rewrite_rule->rewrite_regexp);
+ free(rewrite_rule->search_pattern);
+ free(rewrite_rule);
+ }
+
+ delete query_filters;
+}
+
+B_DB_INGRES::B_DB_INGRES(JCR *jcr,
+ const char *db_driver,
+ const char *db_name,
+ const char *db_user,
+ const char *db_password,
+ const char *db_address,
+ int db_port,
+ const char *db_socket,
+ bool mult_db_connections,
+ bool disable_batch_insert)
+{
+ B_DB_INGRES *mdb;
+ int next_session_id = 0;
+
+ /*
+ * See what the next available session_id is.
+ * We first see what the highest session_id is used now.
+ */
+ if (db_list) {
+ foreach_dlist(mdb, db_list) {
+ if (mdb->m_session_id > next_session_id) {
+ next_session_id = mdb->m_session_id;
+ }
+ }
+ }
+
+ /*
+ * Initialize the parent class members.
+ */
+ m_db_interface_type = SQL_INTERFACE_TYPE_INGRES;
+ m_db_type = SQL_TYPE_INGRES;
+ m_db_driver = bstrdup("ingres");
+ m_db_name = bstrdup(db_name);
+ m_db_user = bstrdup(db_user);
+ if (db_password) {
+ m_db_password = bstrdup(db_password);
+ }
+ if (db_address) {
+ m_db_address = bstrdup(db_address);
+ }
+ if (db_socket) {
+ m_db_socket = bstrdup(db_socket);
+ }
+ m_db_port = db_port;
+ if (disable_batch_insert) {
+ m_disabled_batch_insert = true;
+ m_have_batch_insert = false;
+ } else {
+ m_disabled_batch_insert = false;
+#if defined(USE_BATCH_FILE_INSERT)
+ m_have_batch_insert = true;
+#else
+ m_have_batch_insert = false;
+#endif
+ }
+
+ errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *errmsg = 0;
+ cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ cached_path = get_pool_memory(PM_FNAME);
+ cached_path_id = 0;
+ m_ref_count = 1;
+ fname = get_pool_memory(PM_FNAME);
+ path = get_pool_memory(PM_FNAME);
+ esc_name = get_pool_memory(PM_FNAME);
+ esc_path = get_pool_memory(PM_FNAME);
+ esc_obj = get_pool_memory(PM_FNAME);
+ m_allow_transactions = mult_db_connections;
+
+ /*
+ * Initialize the private members.
+ */
+ m_db_handle = NULL;
+ m_result = NULL;
+ m_explicit_commit = true;
+ m_session_id = ++next_session_id;
+ m_query_filters = db_initialize_query_filters(jcr);
+
+ /*
+ * Put the db in the list.
+ */
+ if (db_list == NULL) {
+ db_list = New(dlist(this, &this->m_link));
+ }
+ db_list->append(this);
+}
+
+B_DB_INGRES::~B_DB_INGRES()
+{
+}
+
+/*
+ * Now actually open the database. This can generate errors,
+ * which are returned in the errmsg
+ *
+ * DO NOT close the database or delete mdb here !!!!
+ */
+bool B_DB_INGRES::db_open_database(JCR *jcr)
+{
+ bool retval = false;
+ int errstat;
+
+ P(mutex);
+ if (m_connected) {
+ retval = true;
+ goto bail_out;
+ }
+
+ if ((errstat=rwl_init(&m_lock)) != 0) {
+ berrno be;
+ Mmsg1(&errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ be.bstrerror(errstat));
+ goto bail_out;
+ }
+
+ m_db_handle = INGconnectDB(m_db_name, m_db_user, m_db_password, m_session_id);
+
+ Dmsg0(50, "Ingres real CONNECT done\n");
+ Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", m_db_user, m_db_name,
+ m_db_password == NULL ? "(NULL)" : m_db_password);
+
+ if (!m_db_handle) {
+ Mmsg2(&errmsg, _("Unable to connect to Ingres server.\n"
+ "Database=%s User=%s\n"
+ "It is probably not running or your password is incorrect.\n"),
+ m_db_name, m_db_user);
+ goto bail_out;
+ }
+
+ m_connected = true;
+
+ INGsetDefaultLockingMode(m_db_handle);
+
+ if (!check_tables_version(jcr, this)) {
+ goto bail_out;
+ }
+
+ retval = true;
+
+bail_out:
+ V(mutex);
+ return retval;
+}
+
+void B_DB_INGRES::db_close_database(JCR *jcr)
+{
+ db_end_transaction(jcr);
+ P(mutex);
+ sql_free_result();
+ m_ref_count--;
+ if (m_ref_count == 0) {
+ db_list->remove(this);
+ if (m_connected && m_db_handle) {
+ INGdisconnectDB(m_db_handle);
+ }
+ if (m_query_filters) {
+ db_destroy_query_filters(m_query_filters);
+ }
+ rwl_destroy(&m_lock);
+ free_pool_memory(errmsg);
+ free_pool_memory(cmd);
+ free_pool_memory(cached_path);
+ free_pool_memory(fname);
+ free_pool_memory(path);
+ free_pool_memory(esc_name);
+ free_pool_memory(esc_path);
+ free_pool_memory(esc_obj);
+ free(m_db_driver);
+ free(m_db_name);
+ free(m_db_user);
+ if (m_db_password) {
+ free(m_db_password);
+ }
+ if (m_db_address) {
+ free(m_db_address);
+ }
+ if (m_db_socket) {
+ free(m_db_socket);
+ }
+ delete this;
+ if (db_list->size() == 0) {
+ delete db_list;
+ db_list = NULL;
+ }
+ }
+ V(mutex);
+}
+
+void B_DB_INGRES::db_thread_cleanup(void)
+{
+}
+
+/*
+ * Escape strings so that Ingres is happy
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ */
+void B_DB_INGRES::db_escape_string(JCR *jcr, char *snew, char *old, int len)
+{
+ char *n, *o;
+
+ n = snew;
+ o = old;
+ while (len--) {
+ switch (*o) {
+ case '\'':
+ *n++ = '\'';
+ *n++ = '\'';
+ o++;
+ break;
+ case 0:
+ *n++ = '\\';
+ *n++ = 0;
+ o++;
+ break;
+ default:
+ *n++ = *o++;
+ break;
+ }
+ }
+ *n = 0;
+}
+
+/*
+ * Escape binary so that Ingres is happy
+ *
+ * NOTE! Need to be implemented (escape \0)
+ *
+ */
+char *B_DB_INGRES::db_escape_object(JCR *jcr, char *old, int len)
+{
+ char *n, *o;
+
+ n = esc_obj = check_pool_memory_size(esc_obj, len*2+1);
+ o = old;
+ while (len--) {
+ switch (*o) {
+ case '\'':
+ *n++ = '\'';
+ *n++ = '\'';
+ o++;
+ break;
+ case 0:
+ *n++ = '\\';
+ *n++ = 0;
+ o++;
+ break;
+ default:
+ *n++ = *o++;
+ break;
+ }
+ }
+ *n = 0;
+ return esc_obj;
+}
+
+/*
+ * Unescape binary object so that Ingres is happy
+ *
+ * TODO: need to be implemented (escape \0)
+ */
+void B_DB_INGRES::db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *dest_len)
+{
+ if (!from) {
+ *dest[0] = 0;
+ *dest_len = 0;
+ return;
+ }
+ *dest = check_pool_memory_size(*dest, expected_len+1);
+ *dest_len = expected_len;
+ memcpy(*dest, from, expected_len);
+ (*dest)[expected_len]=0;
+}
+
+/*
+ * Start a transaction. This groups inserts and makes things
+ * much more efficient. Usually started when inserting
+ * file attributes.
+ */
+void B_DB_INGRES::db_start_transaction(JCR *jcr)
+{
+ if (!jcr->attr) {
+ jcr->attr = get_pool_memory(PM_FNAME);
+ }
+ if (!jcr->ar) {
+ jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
+ }
+
+ if (!m_allow_transactions) {
+ return;
+ }
+
+ db_lock(this);
+ /* Allow only 25,000 changes per transaction */
+ if (m_transaction && changes > 25000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start Ingres transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
+}
+
+void B_DB_INGRES::db_end_transaction(JCR *jcr)
+{
+ if (jcr && jcr->cached_attribute) {
+ Dmsg0(400, "Flush last cached attribute.\n");
+ if (!db_create_attributes_record(jcr, this, jcr->ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
+ }
+ jcr->cached_attribute = false;
+ }
+
+ if (!m_allow_transactions) {
+ return;
+ }
+
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End Ingres transaction changes=%d\n", changes);
+ }
+ changes = 0;
+ db_unlock(this);
+}
+
+/*
+ * Submit a general SQL command (cmd), and for each row returned,
+ * the result_handler is called with the ctx.
+ */
+bool B_DB_INGRES::db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+{
+ SQL_ROW row;
+ bool retval = true;
+
+ Dmsg1(500, "db_sql_query starts with %s\n", query);
+
+ db_lock(this);
+ if (!sql_query(query, QF_STORE_RESULT)) {
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror());
+ Dmsg0(500, "db_sql_query failed\n");
+ retval = false;
+ goto bail_out;
+ }
+
+ if (result_handler != NULL) {
+ Dmsg0(500, "db_sql_query invoking handler\n");
+ while ((row = sql_fetch_row()) != NULL) {
+ Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
+ if (result_handler(ctx, m_num_fields, row))
+ break;
+ }
+ sql_free_result();
+ }
+
+ Dmsg0(500, "db_sql_query finished\n");
+
+bail_out:
+ db_unlock(this);
+ return retval;
+}
+
+/*
+ * Note, if this routine returns false (failure), Bacula expects
+ * that no result has been stored.
+ *
+ * Returns: true on success
+ * false on failure
+ *
+ */
+bool B_DB_INGRES::sql_query(const char *query, int flags)
+{
+ int cols;
+ char *cp, *bp;
+ char *dup_query, *new_query;
+ bool retval = true;
+ bool start_of_transaction = false;
+ bool end_of_transaction = false;
+ B_DB_RWRULE *rewrite_rule;
+
+ Dmsg1(500, "query starts with '%s'\n", query);
+ /*
+ * We always make a private copy of the query as we are doing serious
+ * rewrites in this engine. When running the private copy through the
+ * different query filters we loose the orginal private copy so we
+ * first make a extra reference to it so we can free it on exit from the
+ * function.
+ */
+ dup_query = new_query = bstrdup(query);
+
+ /*
+ * Iterate over the query string and perform any needed operations.
+ * We use a sliding window over the query string where bp points to
+ * the previous position in the query and cp to the current position
+ * in the query.
+ */
+ bp = new_query;
+ while (bp != NULL) {
+ if ((cp = strchr(bp, ' ')) != NULL) {
+ *cp++;
+ }
+
+ if (!strncasecmp(bp, "BEGIN", 5)) {
+ /*
+ * This is the start of a transaction.
+ * Inline copy the rest of the query over the BEGIN keyword.
+ */
+ if (cp) {
+ strcpy(bp, cp);
+ } else {
+ *bp = '\0';
+ }
+ start_of_transaction = true;
+ } else if (!strncasecmp(bp, "COMMIT", 6) && (cp == NULL || strncasecmp(cp, "PRESERVE", 8))) {
+ /*
+ * This is the end of a transaction. We cannot check for just the COMMIT
+ * keyword as a DECLARE of an tempory table also has the word COMMIT in it
+ * but its followed by the word PRESERVE.
+ * Inline copy the rest of the query over the COMMIT keyword.
+ */
+ if (cp) {
+ strcpy(bp, cp);
+ } else {
+ *bp = '\0';
+ }
+ end_of_transaction = true;
+ }
+
+ /*
+ * See what query filter might match.
+ */
+ foreach_alist(rewrite_rule, m_query_filters) {
+ if (!strncasecmp(bp, rewrite_rule->search_pattern, rewrite_rule->pattern_length)) {
+ rewrite_rule->trigger = true;
+ }
+ }
+
+ /*
+ * Slide window.
+ */
+ bp = cp;
+ }
+
+ /*
+ * Run the query through all query filters that apply e.g. have the trigger set in the
+ * previous loop.
+ */
+ foreach_alist(rewrite_rule, m_query_filters) {
+ if (rewrite_rule->trigger) {
+ new_query = rewrite_rule->rewrite_regexp->replace(new_query);
+ rewrite_rule->trigger = false;
+ }
+ }
+
+ if (start_of_transaction) {
+ Dmsg0(500,"sql_query: Start of transaction\n");
+ m_explicit_commit = false;
+ }
+
+ /*
+ * See if there is any query left after filtering for certain keywords.
+ */
+ bp = new_query;
+ while (bp != NULL && strlen(bp) > 0) {
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
+
+ if (m_result) {
+ INGclear(m_result); /* hmm, someone forgot to free?? */
+ m_result = NULL;
+ }
+
+ /*
+ * See if this is a multi-statement query. We split a multi-statement query
+ * on the semi-column and feed the individual queries to the Ingres functions.
+ * We use a sliding window over the query string where bp points to
+ * the previous position in the query and cp to the current position
+ * in the query.
+ */
+ if ((cp = strchr(bp, ';')) != NULL) {
+ *cp++ = '\0';
+ }
+
+ Dmsg1(500, "sql_query after rewrite continues with '%s'\n", bp);
+
+ /*
+ * See if we got a store_result hint which could mean we are running a select.
+ * If flags has QF_STORE_RESULT not set we are sure its not a query that we
+ * need to store anything for.
+ */
+ if (flags & QF_STORE_RESULT) {
+ cols = INGgetCols(m_db_handle, bp, m_explicit_commit);
+ } else {
+ cols = 0;
+ }
+
+ if (cols <= 0) {
+ if (cols < 0 ) {
+ Dmsg0(500,"sql_query: neg.columns: no DML stmt!\n");
+ retval = false;
+ goto bail_out;
+ }
+ Dmsg0(500,"sql_query (non SELECT) starting...\n");
+ /*
+ * non SELECT
+ */
+ m_num_rows = INGexec(m_db_handle, bp, m_explicit_commit);
+ if (m_num_rows == -1) {
+ Dmsg0(500,"sql_query (non SELECT) went wrong\n");
+ retval = false;
+ goto bail_out;
+ } else {
+ Dmsg0(500,"sql_query (non SELECT) seems ok\n");
+ }
+ } else {
+ /*
+ * SELECT
+ */
+ Dmsg0(500,"sql_query (SELECT) starting...\n");
+ m_result = INGquery(m_db_handle, bp, m_explicit_commit);
+ if (m_result != NULL) {
+ Dmsg0(500, "we have a result\n");
+
+ /*
+ * How many fields in the set?
+ */
+ m_num_fields = (int)INGnfields(m_result);
+ Dmsg1(500, "we have %d fields\n", m_num_fields);
+
+ m_num_rows = INGntuples(m_result);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
+ } else {
+ Dmsg0(500, "No resultset...\n");
+ retval = false;
+ goto bail_out;
+ }
+ }
+
+ bp = cp;
+ }
+
+bail_out:
+ if (end_of_transaction) {
+ Dmsg0(500,"sql_query: End of transaction, commiting work\n");
+ m_explicit_commit = true;
+ INGcommit(m_db_handle);
+ }
+
+ free(dup_query);
+ Dmsg0(500, "sql_query finishing\n");
+
+ return retval;
+}
+
+void B_DB_INGRES::sql_free_result(void)
+{
+ db_lock(this);
+ if (m_result) {
+ INGclear(m_result);
+ m_result = NULL;
+ }
+ if (m_rows) {
+ free(m_rows);
+ m_rows = NULL;
+ }
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ m_num_rows = m_num_fields = 0;
+ db_unlock(this);
+}
+
+SQL_ROW B_DB_INGRES::sql_fetch_row(void)
+{
+ int j;
+ SQL_ROW row = NULL; /* by default, return NULL */
+
+ if (!m_result) {
+ return row;
+ }
+ if (m_result->num_rows <= 0) {
+ return row;
+ }
+
+ Dmsg0(500, "sql_fetch_row start\n");
+
+ if (!m_rows || m_rows_size < m_num_fields) {
+ if (m_rows) {
+ Dmsg0(500, "sql_fetch_row freeing space\n");
+ free(m_rows);
+ }
+ Dmsg1(500, "we need space for %d bytes\n", sizeof(char *) * m_num_fields);
+ m_rows = (SQL_ROW)malloc(sizeof(char *) * m_num_fields);
+ m_rows_size = m_num_fields;
+
+ /*
+ * Now reset the row_number now that we have the space allocated
+ */
+ m_row_number = 0;
+ }
+
+ /*
+ * If still within the result set
+ */
+ if (m_row_number < m_num_rows) {
+ Dmsg2(500, "sql_fetch_row row number '%d' is acceptable (0..%d)\n", m_row_number, m_num_rows);
+ /*
+ * Get each value from this row
+ */
+ for (j = 0; j < m_num_fields; j++) {
+ m_rows[j] = INGgetvalue(m_result, m_row_number, j);
+ Dmsg2(500, "sql_fetch_row field '%d' has value '%s'\n", j, m_rows[j]);
+ }
+ /*
+ * Increment the row number for the next call
+ */
+ m_row_number++;
+
+ row = m_rows;
+ } else {
+ Dmsg2(500, "sql_fetch_row row number '%d' is NOT acceptable (0..%d)\n", m_row_number, m_num_rows);
+ }
+
+ Dmsg1(500, "sql_fetch_row finishes returning %p\n", row);
+
+ return row;
+}
+
+const char *B_DB_INGRES::sql_strerror(void)
+{
+ return INGerrorMessage(m_db_handle);
+}
+
+void B_DB_INGRES::sql_data_seek(int row)
+{
+ /*
+ * Set the row number to be returned on the next call to sql_fetch_row
+ */
+ m_row_number = row;
+}
+
+int B_DB_INGRES::sql_affected_rows(void)
+{
+ return m_num_rows;
+}
+
+/*
+ * First execute the insert query and then retrieve the currval.
+ * By setting transaction to true we make it an atomic transaction
+ * and as such we can get the currval after which we commit if
+ * transaction is false. This way things are an atomic operation
+ * for Ingres and things work. We save the current transaction status
+ * and set transaction in the mdb to true and at the end of this
+ * function we restore the actual transaction status.
+ */
+uint64_t B_DB_INGRES::sql_insert_autokey_record(const char *query, const char *table_name)
+{
+ char sequence[64];
+ char getkeyval_query[256];
+ char *currval;
+ uint64_t id = 0;
+ bool current_explicit_commit;
+
+ /*
+ * Save the current transaction status and pretend we are in a transaction.
+ */
+ current_explicit_commit = m_explicit_commit;
+ m_explicit_commit = false;
+
+ /*
+ * Execute the INSERT query.
+ */
+ m_num_rows = INGexec(m_db_handle, query, m_explicit_commit);
+ if (m_num_rows == -1) {
+ goto bail_out;
+ }
+
+ changes++;
+
+ /*
+ * Obtain the current value of the sequence that
+ * provides the serial value for primary key of the table.
+ *
+ * currval is local to our session. It is not affected by
+ * other transactions.
+ *
+ * Determine the name of the sequence.
+ * As we name all sequences as <table>_seq this is easy.
+ */
+ bstrncpy(sequence, table_name, sizeof(sequence));
+ bstrncat(sequence, "_seq", sizeof(sequence));
+
+ bsnprintf(getkeyval_query, sizeof(getkeyval_query), "SELECT %s.currval FROM %s", sequence, table_name);
+
+ if (m_result) {
+ INGclear(m_result);
+ m_result = NULL;
+ }
+ m_result = INGquery(m_db_handle, getkeyval_query, m_explicit_commit);
+
+ if (!m_result) {
+ Dmsg1(50, "Query failed: %s\n", getkeyval_query);
+ goto bail_out;
+ }
+
+ Dmsg0(500, "exec done");
+
+ currval = INGgetvalue(m_result, 0, 0);
+ if (currval) {
+ id = str_to_uint64(currval);
+ }
+
+ INGclear(m_result);
+ m_result = NULL;
+
+bail_out:
+ /*
+ * Restore the actual explicit_commit status.
+ */
+ m_explicit_commit = current_explicit_commit;
+
+ /*
+ * Commit if explicit_commit is not set.
+ */
+ if (m_explicit_commit) {
+ INGcommit(m_db_handle);
+ }
+
+ return id;
+}
+
+SQL_FIELD *B_DB_INGRES::sql_fetch_field(void)
+{
+ int i, j;
+ int max_length;
+ int this_length;
+
+ if (!m_fields || m_fields_size < m_num_fields) {
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ Dmsg1(500, "allocating space for %d fields\n", m_num_fields);
+ m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * m_num_fields);
+ m_fields_size = m_num_fields;
+
+ for (i = 0; i < m_num_fields; i++) {
+ Dmsg1(500, "filling field %d\n", i);
+ m_fields[i].name = INGfname(m_result, i);
+ m_fields[i].type = INGftype(m_result, i);
+ m_fields[i].flags = 0;
+
+ /*
+ * For a given column, find the max length.
+ */
+ max_length = 0;
+ for (j = 0; j < m_num_rows; j++) {
+ if (INGgetisnull(m_result, j, i)) {
+ this_length = 4; /* "NULL" */
+ } else {
+ this_length = cstrlen(INGgetvalue(m_result, j, i));
+ }
+
+ if (max_length < this_length) {
+ max_length = this_length;
+ }
+ }
+ m_fields[i].max_length = max_length;
+
+ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ m_fields[i].name, m_fields[i].max_length, m_fields[i].type, m_fields[i].flags);
+ }
+ }
+
+ /*
+ * Increment field number for the next time around
+ */
+ return &m_fields[m_field_number++];
+}
+
+bool B_DB_INGRES::sql_field_is_not_null(int field_type)
+{
+ switch (field_type) {
+ case 1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool B_DB_INGRES::sql_field_is_numeric(int field_type)
+{
+ /*
+ * See ${II_SYSTEM}/ingres/files/eqsqlda.h for numeric types.
+ */
+ switch (field_type) {
+ case IISQ_DEC_TYPE:
+ case IISQ_INT_TYPE:
+ case IISQ_FLT_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Escape strings so that Ingres is happy on COPY
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ */
+static char *ingres_copy_escape(char *dest, char *src, size_t len)
+{
+ /* we have to escape \t, \n, \r, \ */
+ char c = '\0' ;
+
+ while (len > 0 && *src) {
+ switch (*src) {
+ case '\n':
+ c = 'n';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case '\t':
+ c = 't';
+ break;
+ case '\r':
+ c = 'r';
+ break;
+ default:
+ c = '\0' ;
+ }
+
+ if (c) {
+ *dest = '\\';
+ dest++;
+ *dest = c;
+ } else {
+ *dest = *src;
+ }
+
+ len--;
+ src++;
+ dest++;
+ }
+
+ *dest = '\0';
+ return dest;
+}
+
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_INGRES::sql_batch_start(JCR *jcr)
+{
+ bool ok;
+
+ db_lock(this);
+ ok = sql_query("DECLARE GLOBAL TEMPORARY TABLE batch ("
+ "FileIndex INTEGER,"
+ "JobId INTEGER,"
+ "Path VARBYTE(32000),"
+ "Name VARBYTE(32000),"
+ "LStat VARBYTE(255),"
+ "MD5 VARBYTE(255),"
+ "MarkId INTEGER)"
+ " ON COMMIT PRESERVE ROWS WITH NORECOVERY");
+ db_unlock(this);
+ return ok;
+}
+
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_INGRES::sql_batch_end(JCR *jcr, const char *error)
+{
+ m_status = 0;
+ return true;
+}
+
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_INGRES::sql_batch_insert(JCR *jcr, ATTR_DBR *ar)
+{
+ size_t len;
+ const char *digest;
+ char ed1[50];
+
+ esc_name = check_pool_memory_size(esc_name, fnl*2+1);
+ db_escape_string(jcr, esc_name, fname, fnl);
+
+ esc_path = check_pool_memory_size(esc_path, pnl*2+1);
+ db_escape_string(jcr, esc_path, path, pnl);
+
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+
+ len = Mmsg(cmd, "INSERT INTO batch VALUES "
+ "(%u,%s,'%s','%s','%s','%s',%u)",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ return sql_query(cmd);
+}
+
+/*
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
+ */
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
+ const char *db_password, const char *db_address, int db_port,
+ const char *db_socket, bool mult_db_connections, bool disable_batch_insert)
+{
+ B_DB_INGRES *mdb = NULL;
+
+ if (!db_user) {
+ Jmsg(jcr, M_FATAL, 0, _("A user name for Ingres must be supplied.\n"));
+ return NULL;
+ }
+
+ P(mutex); /* lock DB queue */
+ if (db_list && !mult_db_connections) {
+ /*
+ * Look to see if DB already open
+ */
+ foreach_dlist(mdb, db_list) {
+ if (mdb->db_match_database(db_driver, db_name, db_address, db_port)) {
+ Dmsg1(100, "DB REopen %s\n", db_name);
+ mdb->increment_refcount();
+ goto bail_out;
+ }
+ }
+ }
+
+ Dmsg0(100, "db_init_database first time\n");
+ mdb = New(B_DB_INGRES(jcr, db_driver, db_name, db_user, db_password, db_address,
+ db_port, db_socket, mult_db_connections, disable_batch_insert));
+
+bail_out:
+ V(mutex);
+ return mdb;
+}
+#endif /* HAVE_INGRES */
--- /dev/null
+#!/bin/sh
+#
+# shell script to create Bacula PostgreSQL tables
+#
+bindir=@INGRES_BINDIR@
+db_name=@db_name@
+
+sql $* ${db_name}
#!/bin/sh
#
# This routine makes the appropriately configured
-# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite.
+# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite.
#
-if test xsqlite3 = x@DB_TYPE@ ; then
- echo "Making SQLite tables"
- @scriptdir@/make_@DB_TYPE@_tables $*
-else
- if test xmysql = x@DB_TYPE@ ; then
- echo "Making MySQL tables"
- @scriptdir@/make_mysql_tables $*
- elif test xingres = x@DB_TYPE@ ; then
- echo "Making Ingres tables"
- @scriptdir@/make_ingres_tables $*
- else
- echo "Making PostgreSQL tables"
- @scriptdir@/make_postgresql_tables $*
- fi
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+echo "Making ${db_type} tables"
+@scriptdir@/make_${db_type}_tables $*
# password.
# $4 is the host on which the database is located
# (default "")
+# $5 is the type of database
#
#
-BINDIR=@SQL_BINDIR@
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the fifth argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -ge 5 ]; then
+ case $5 in
+ sqlite3)
+ db_type=$5
+ ;;
+ mysql)
+ db_type=$5
+ ;;
+ postgresql)
+ db_type=$5
+ ;;
+ ingres)
+ db_type=$5
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
+fi
cd @working_dir@
rm -f $1.sql
-if test xsqlite = x@DB_TYPE@ ; then
- echo ".dump" | ${BINDIR}/sqlite $1.db >$1.sql
-else
- if test xmysql = x@DB_TYPE@ ; then
+
+case ${db_type} in
+ sqlite3)
+ BINDIR=@SQLITE_BINDIR@
+ echo ".dump" | ${BINDIR}/sqlite3 $1.db >$1.sql
+ ;;
+ mysql)
+ BINDIR=@MYSQL_BINDIR@
if test $# -gt 2; then
MYSQLPASSWORD=" --password=$3"
else
MYSQLHOST=""
fi
${BINDIR}/mysqldump -u ${2}${MYSQLPASSWORD}${MYSQLHOST} -f --opt $1 >$1.sql
- else
- if test xpostgresql = x@DB_TYPE@ ; then
- if test $# -gt 2; then
- PGPASSWORD=$3
- export PGPASSWORD
- fi
- if test $# -gt 3; then
- PGHOST=" --host=$4"
- else
- PGHOST=""
- fi
- # you could also add --compress for compression. See man pg_dump
- exec ${BINDIR}/pg_dump -c $PGHOST -U $2 $1 >$1.sql
+ ;;
+ postgresql)
+ BINDIR=@POSTGRESQL_BINDIR@
+ if test $# -gt 2; then
+ PGPASSWORD=$3
+ export PGPASSWORD
+ fi
+ if test $# -gt 3; then
+ PGHOST=" --host=$4"
else
- echo ".dump" | ${BINDIR}/sqlite3 $1.db >$1.sql
+ PGHOST=""
fi
- fi
-fi
+ # you could also add --compress for compression. See man pg_dump
+ exec ${BINDIR}/pg_dump -c $PGHOST -U $2 $1 >$1.sql
+ ;;
+esac
#
# To read back a MySQL database use:
# cd @working_dir@
#
# shell script to create Bacula Ingres tables
#
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
db_user=${db_user:-@db_user@}
# Important note:
# You won't get any support for performance issue if you changed the default
# schema.
-
-bindir=@SQL_BINDIR@
+#
+bindir=@MYSQL_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
# You won't get any support for performance issue if you changed the default
# schema.
#
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
#
# shell script to create Bacula SQLite tables
-bindir=@SQL_BINDIR@
+bindir=@SQLITE_BINDIR@
PATH="$bindir:$PATH"
cd @working_dir@
-sqlite=@DB_TYPE@
db_name=@db_name@
-${sqlite} $* ${db_name}.db <<END-OF-DATA
+sqlite3 $* ${db_name}.db <<END-OF-DATA
CREATE TABLE Filename (
FilenameId INTEGER,
Name TEXT DEFAULT '',
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, March 2000
*
+ * Major rewrite by Marco van Wieringen, January 2010 for catalog refactoring.
*/
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
#ifdef HAVE_MYSQL
+#include "cats.h"
+#include "bdb_priv.h"
+#include <mysql.h>
+#include <bdb_mysql.h>
+
/* -----------------------------------------------------------------------
*
* MySQL dependent defines and subroutines
* -----------------------------------------------------------------------
*/
-/* List of open databases */
+/*
+ * List of open databases
+ */
static dlist *db_list = NULL;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-/*
- * Retrieve database type
- */
-const char *
-db_get_type(void)
+B_DB_MYSQL::B_DB_MYSQL(JCR *jcr,
+ const char *db_driver,
+ const char *db_name,
+ const char *db_user,
+ const char *db_password,
+ const char *db_address,
+ int db_port,
+ const char *db_socket,
+ bool mult_db_connections,
+ bool disable_batch_insert)
{
- return "MySQL";
-}
-
-/*
- * Initialize database data structure. In principal this should
- * never have errors, or it is really fatal.
- */
-B_DB *
-db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
- int mult_db_connections)
-{
- B_DB *mdb = NULL;
-
- if (!db_user) {
- Jmsg(jcr, M_FATAL, 0, _("A user name for MySQL must be supplied.\n"));
- return NULL;
- }
- P(mutex); /* lock DB queue */
- if (db_list == NULL) {
- db_list = New(dlist(mdb, &mdb->link));
- }
- /* Look to see if DB already open */
- if (!mult_db_connections) {
- foreach_dlist(mdb, db_list) {
- if (bstrcmp(mdb->db_name, db_name) &&
- bstrcmp(mdb->db_address, db_address) &&
- mdb->db_port == db_port) {
- Dmsg2(100, "DB REopen %d %s\n", mdb->ref_count, db_name);
- mdb->ref_count++;
- V(mutex);
- Dmsg3(100, "initdb ref=%d connected=%d db=%p\n", mdb->ref_count,
- mdb->connected, mdb->db);
- return mdb; /* already open */
- }
- }
- }
- Dmsg0(100, "db_open first time\n");
- mdb = (B_DB *)malloc(sizeof(B_DB));
- memset(mdb, 0, sizeof(B_DB));
- mdb->db_name = bstrdup(db_name);
- mdb->db_user = bstrdup(db_user);
+ /*
+ * Initialize the parent class members.
+ */
+ m_db_interface_type = SQL_INTERFACE_TYPE_MYSQL;
+ m_db_type = SQL_TYPE_MYSQL;
+ m_db_driver = bstrdup("MySQL");
+ m_db_name = bstrdup(db_name);
+ m_db_user = bstrdup(db_user);
if (db_password) {
- mdb->db_password = bstrdup(db_password);
+ m_db_password = bstrdup(db_password);
}
if (db_address) {
- mdb->db_address = bstrdup(db_address);
+ m_db_address = bstrdup(db_address);
}
if (db_socket) {
- mdb->db_socket = bstrdup(db_socket);
+ m_db_socket = bstrdup(db_socket);
}
- mdb->db_port = db_port;
- mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
- *mdb->errmsg = 0;
- mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
- mdb->cached_path = get_pool_memory(PM_FNAME);
- mdb->cached_path_id = 0;
- mdb->ref_count = 1;
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
- mdb->esc_path = get_pool_memory(PM_FNAME);
- mdb->esc_obj = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- db_list->append(mdb); /* put db in list */
- Dmsg3(100, "initdb ref=%d connected=%d db=%p\n", mdb->ref_count,
- mdb->connected, mdb->db);
- V(mutex);
- return mdb;
+ db_port = db_port;
+
+ if (disable_batch_insert) {
+ m_disabled_batch_insert = true;
+ m_have_batch_insert = false;
+ } else {
+ m_disabled_batch_insert = false;
+#if defined(USE_BATCH_FILE_INSERT)
+# if defined(HAVE_MYSQL_THREAD_SAFE)
+ m_have_batch_insert = mysql_thread_safe();
+# else
+ m_have_batch_insert = false;
+# endif /* HAVE_MYSQL_THREAD_SAFE */
+#else
+ m_have_batch_insert = false;
+#endif /* USE_BATCH_FILE_INSERT */
+ }
+ errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *errmsg = 0;
+ cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ cached_path = get_pool_memory(PM_FNAME);
+ cached_path_id = 0;
+ m_ref_count = 1;
+ fname = get_pool_memory(PM_FNAME);
+ path = get_pool_memory(PM_FNAME);
+ esc_name = get_pool_memory(PM_FNAME);
+ esc_path = get_pool_memory(PM_FNAME);
+ esc_obj = get_pool_memory(PM_FNAME);
+ m_allow_transactions = mult_db_connections;
+
+ /*
+ * Initialize the private members.
+ */
+ m_db_handle = NULL;
+ m_result = NULL;
+
+ /*
+ * Put the db in the list.
+ */
+ if (db_list == NULL) {
+ db_list = New(dlist(this, &this->m_link));
+ }
+ db_list->append(this);
+}
+
+B_DB_MYSQL::~B_DB_MYSQL()
+{
}
/*
* Now actually open the database. This can generate errors,
* which are returned in the errmsg
*
- * DO NOT close the database or free(mdb) here !!!!
+ * DO NOT close the database or delete mdb here !!!!
*/
-int
-db_open_database(JCR *jcr, B_DB *mdb)
+bool B_DB_MYSQL::db_open_database(JCR *jcr)
{
+ bool retval = false;
int errstat;
P(mutex);
- if (mdb->connected) {
- V(mutex);
- return 1;
+ if (m_connected) {
+ retval = true;
+ goto bail_out;
}
- if ((errstat=rwl_init(&mdb->lock)) != 0) {
+ if ((errstat=rwl_init(&m_lock)) != 0) {
berrno be;
- Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ Mmsg1(&errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
be.bstrerror(errstat));
- V(mutex);
- return 0;
+ goto bail_out;
}
- /* connect to the database */
+ /*
+ * Connect to the database
+ */
#ifdef xHAVE_EMBEDDED_MYSQL
// mysql_server_init(0, NULL, NULL);
#endif
- mysql_init(&mdb->mysql);
+ mysql_init(&m_instance);
Dmsg0(50, "mysql_init done\n");
- /* If connection fails, try at 5 sec intervals for 30 seconds. */
+ /*
+ * If connection fails, try at 5 sec intervals for 30 seconds.
+ */
for (int retry=0; retry < 6; retry++) {
- mdb->db = mysql_real_connect(
- &(mdb->mysql), /* db */
- mdb->db_address, /* default = localhost */
- mdb->db_user, /* login name */
- mdb->db_password, /* password */
- mdb->db_name, /* database name */
- mdb->db_port, /* default port */
- mdb->db_socket, /* default = socket */
- CLIENT_FOUND_ROWS); /* flags */
-
- /* If no connect, try once more in case it is a timing problem */
- if (mdb->db != NULL) {
+ m_db_handle = mysql_real_connect(
+ &(m_instance), /* db */
+ m_db_address, /* default = localhost */
+ m_db_user, /* login name */
+ m_db_password, /* password */
+ m_db_name, /* database name */
+ m_db_port, /* default port */
+ m_db_socket, /* default = socket */
+ CLIENT_FOUND_ROWS); /* flags */
+
+ /*
+ * If no connect, try once more in case it is a timing problem
+ */
+ if (m_db_handle != NULL) {
break;
}
bmicrosleep(5,0);
}
- mdb->mysql.reconnect = 1; /* so connection does not timeout */
+ m_instance.reconnect = 1; /* so connection does not timeout */
Dmsg0(50, "mysql_real_connect done\n");
- Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", mdb->db_user, mdb->db_name,
- mdb->db_password==NULL?"(NULL)":mdb->db_password);
+ Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", m_db_user, m_db_name,
+ (m_db_password == NULL) ? "(NULL)" : m_db_password);
- if (mdb->db == NULL) {
- Mmsg2(&mdb->errmsg, _("Unable to connect to MySQL server.\n"
+ if (m_db_handle == NULL) {
+ Mmsg2(&errmsg, _("Unable to connect to MySQL server.\n"
"Database=%s User=%s\n"
"MySQL connect failed either server not running or your authorization is incorrect.\n"),
- mdb->db_name, mdb->db_user);
+ m_db_name, m_db_user);
#if MYSQL_VERSION_ID >= 40101
Dmsg3(50, "Error %u (%s): %s\n",
- mysql_errno(&(mdb->mysql)), mysql_sqlstate(&(mdb->mysql)),
- mysql_error(&(mdb->mysql)));
+ mysql_errno(&(m_instance)), mysql_sqlstate(&(m_instance)),
+ mysql_error(&(m_instance)));
#else
Dmsg2(50, "Error %u: %s\n",
- mysql_errno(&(mdb->mysql)), mysql_error(&(mdb->mysql)));
+ mysql_errno(&(m_instance)), mysql_error(&(m_instance)));
#endif
- V(mutex);
- return 0;
+ goto bail_out;
}
- mdb->connected = true;
- if (!check_tables_version(jcr, mdb)) {
- V(mutex);
- return 0;
+ m_connected = true;
+ if (!check_tables_version(jcr, this)) {
+ goto bail_out;
}
- Dmsg3(100, "opendb ref=%d connected=%d db=%p\n", mdb->ref_count,
- mdb->connected, mdb->db);
+ Dmsg3(100, "opendb ref=%d connected=%d db=%p\n", m_ref_count, m_connected, m_db_handle);
- /* Set connection timeout to 8 days specialy for batch mode */
- sql_query(mdb, "SET wait_timeout=691200");
- sql_query(mdb, "SET interactive_timeout=691200");
+ /*
+ * Set connection timeout to 8 days specialy for batch mode
+ */
+ sql_query("SET wait_timeout=691200");
+ sql_query("SET interactive_timeout=691200");
+
+ retval = true;
+bail_out:
V(mutex);
- return 1;
+ return retval;
}
-void
-db_close_database(JCR *jcr, B_DB *mdb)
+void B_DB_MYSQL::db_close_database(JCR *jcr)
{
- if (!mdb) {
- return;
- }
- db_end_transaction(jcr, mdb);
+ db_end_transaction(jcr);
P(mutex);
- sql_free_result(mdb);
- mdb->ref_count--;
- Dmsg3(100, "closedb ref=%d connected=%d db=%p\n", mdb->ref_count,
- mdb->connected, mdb->db);
- if (mdb->ref_count == 0) {
- db_list->remove(mdb);
- if (mdb->connected) {
- Dmsg1(100, "close db=%p\n", mdb->db);
- mysql_close(&mdb->mysql);
+ sql_free_result();
+ m_ref_count--;
+ Dmsg3(100, "closedb ref=%d connected=%d db=%p\n", m_ref_count, m_connected, m_db_handle);
+ if (m_ref_count == 0) {
+ db_list->remove(this);
+ if (m_connected) {
+ Dmsg1(100, "close db=%p\n", m_db_handle);
+ mysql_close(&m_instance);
#ifdef xHAVE_EMBEDDED_MYSQL
// mysql_server_end();
#endif
}
- rwl_destroy(&mdb->lock);
- free_pool_memory(mdb->errmsg);
- free_pool_memory(mdb->cmd);
- free_pool_memory(mdb->cached_path);
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
- free_pool_memory(mdb->esc_path);
- free_pool_memory(mdb->esc_obj);
- if (mdb->db_name) {
- free(mdb->db_name);
+ rwl_destroy(&m_lock);
+ free_pool_memory(errmsg);
+ free_pool_memory(cmd);
+ free_pool_memory(cached_path);
+ free_pool_memory(fname);
+ free_pool_memory(path);
+ free_pool_memory(esc_name);
+ free_pool_memory(esc_path);
+ free_pool_memory(esc_obj);
+ if (m_db_driver) {
+ free(m_db_driver);
}
- if (mdb->db_user) {
- free(mdb->db_user);
+ if (m_db_name) {
+ free(m_db_name);
}
- if (mdb->db_password) {
- free(mdb->db_password);
+ if (m_db_user) {
+ free(m_db_user);
}
- if (mdb->db_address) {
- free(mdb->db_address);
+ if (m_db_password) {
+ free(m_db_password);
}
- if (mdb->db_socket) {
- free(mdb->db_socket);
+ if (m_db_address) {
+ free(m_db_address);
}
- free(mdb);
+ if (m_db_socket) {
+ free(m_db_socket);
+ }
+ delete this;
if (db_list->size() == 0) {
delete db_list;
db_list = NULL;
V(mutex);
}
-void db_check_backend_thread_safe()
-{
-#ifdef HAVE_BATCH_FILE_INSERT
- if (!mysql_thread_safe()) {
- Emsg0(M_ABORT, 0, _("MySQL client library must be thread-safe "
- "when using BatchMode.\n"));
- }
-#endif
-}
-
/*
* This call is needed because the message channel thread
* opens a database on behalf of a jcr that was created in
* closes the database. Thus the msgchan must call here
* to cleanup any thread specific data that it created.
*/
-void db_thread_cleanup()
+void B_DB_MYSQL::db_thread_cleanup(void)
{
#ifndef HAVE_WIN32
my_thread_end();
}
/*
- * Return the next unique index (auto-increment) for
- * the given table. Return NULL on error.
+ * Escape strings so that MySQL is happy
*
- * For MySQL, NULL causes the auto-increment value
- * to be updated.
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
*/
-int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index)
+void B_DB_MYSQL::db_escape_string(JCR *jcr, char *snew, char *old, int len)
{
- strcpy(index, "NULL");
- return 1;
+ mysql_real_escape_string(m_db_handle, snew, old, len);
}
/*
* Escape binary object so that MySQL is happy
* Memory is stored in B_DB struct, no need to free it
*/
-char *
-db_escape_object(JCR *jcr, B_DB *mdb, char *old, int len)
+char *B_DB_MYSQL::db_escape_object(JCR *jcr, char *old, int len)
{
- mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, len*2+1);
- mysql_real_escape_string(mdb->db, mdb->esc_obj, old, len);
- return mdb->esc_obj;
+ esc_obj = check_pool_memory_size(esc_obj, len*2+1);
+ mysql_real_escape_string(m_db_handle, esc_obj, old, len);
+ return esc_obj;
}
/*
* Unescape binary object so that MySQL is happy
*/
-void
-db_unescape_object(JCR *jcr, B_DB *db,
- char *from, int32_t expected_len,
- POOLMEM **dest, int32_t *dest_len)
+void B_DB_MYSQL::db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *dest_len)
{
if (!from) {
*dest[0] = 0;
(*dest)[expected_len]=0;
}
-/*
- * Escape strings so that MySQL is happy
- *
- * NOTE! len is the length of the old string. Your new
- * string must be long enough (max 2*old+1) to hold
- * the escaped output.
- */
-void
-db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+void B_DB_MYSQL::db_start_transaction(JCR *jcr)
+{
+ if (!jcr->attr) {
+ jcr->attr = get_pool_memory(PM_FNAME);
+ }
+ if (!jcr->ar) {
+ jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
+ }
+}
+
+void B_DB_MYSQL::db_end_transaction(JCR *jcr)
{
- mysql_real_escape_string(mdb->db, snew, old, len);
+ if (jcr && jcr->cached_attribute) {
+ Dmsg0(400, "Flush last cached attribute.\n");
+ if (!db_create_attributes_record(jcr, this, jcr->ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
+ }
+ jcr->cached_attribute = false;
+ }
}
/*
* Submit a general SQL command (cmd), and for each row returned,
- * the sqlite_handler is called with the ctx.
+ * the result_handler is called with the ctx.
*/
-bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+bool B_DB_MYSQL::db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
{
+ int ret;
SQL_ROW row;
bool send = true;
+ bool retval = false;
+
+ Dmsg1(500, "db_sql_query starts with %s\n", query);
- db_lock(mdb);
- if (sql_query(mdb, query) != 0) {
- Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb));
- db_unlock(mdb);
- return false;
+ db_lock(this);
+ ret = mysql_query(m_db_handle, query);
+ if (ret != 0) {
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror());
+ Dmsg0(500, "db_sql_query failed\n");
+ goto bail_out;
}
+
+ Dmsg0(500, "db_sql_query succeeded. checking handler\n");
+
if (result_handler != NULL) {
- if ((mdb->result = sql_use_result(mdb)) != NULL) {
- int num_fields = sql_num_fields(mdb);
+ if ((m_result = mysql_use_result(m_db_handle)) != NULL) {
+ m_num_fields = mysql_num_fields(m_result);
- /* We *must* fetch all rows */
- while ((row = sql_fetch_row(mdb)) != NULL) {
+ /*
+ * We *must* fetch all rows
+ */
+ while ((row = mysql_fetch_row(m_result)) != NULL) {
if (send) {
/* the result handler returns 1 when it has
* seen all the data it wants. However, we
* loop to the end of the data.
*/
- if (result_handler(ctx, num_fields, row)) {
+ if (result_handler(ctx, m_num_fields, row)) {
send = false;
}
}
}
+ sql_free_result();
+ }
+ }
+
+ Dmsg0(500, "db_sql_query finished\n");
+ retval = true;
- sql_free_result(mdb);
+bail_out:
+ db_unlock(this);
+ return retval;
+}
+
+bool B_DB_MYSQL::sql_query(const char *query, int flags)
+{
+ int ret;
+ bool retval = true;
+
+ Dmsg1(500, "sql_query starts with '%s'\n", query);
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
+
+ if (m_result) {
+ mysql_free_result(m_result);
+ m_result = NULL;
+ }
+
+ ret = mysql_query(m_db_handle, query);
+ if (ret == 0) {
+ Dmsg0(500, "we have a result\n");
+ if (flags & QF_STORE_RESULT) {
+ m_result = mysql_store_result(m_db_handle);
+ if (m_result != NULL) {
+ m_num_fields = mysql_num_fields(m_result);
+ Dmsg1(500, "we have %d fields\n", m_num_fields);
+ m_num_rows = mysql_num_rows(m_result);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
+ } else {
+ m_num_fields = 0;
+ m_num_rows = mysql_affected_rows(m_db_handle);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
+ }
+ } else {
+ m_num_fields = 0;
+ m_num_rows = mysql_affected_rows(m_db_handle);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
}
+ } else {
+ Dmsg0(500, "we failed\n");
+ m_status = 1; /* failed */
+ retval = false;
}
- db_unlock(mdb);
- return true;
+ return retval;
+}
+void B_DB_MYSQL::sql_free_result(void)
+{
+ db_lock(this);
+ if (m_result) {
+ mysql_free_result(m_result);
+ m_result = NULL;
+ }
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ m_num_rows = m_num_fields = 0;
+ db_unlock(this);
}
-void my_mysql_free_result(B_DB *mdb)
+SQL_ROW B_DB_MYSQL::sql_fetch_row(void)
{
- db_lock(mdb);
- if (mdb->result) {
- mysql_free_result(mdb->result);
- mdb->result = NULL;
+ if (!m_result) {
+ return NULL;
+ } else {
+ return mysql_fetch_row(m_result);
}
- db_unlock(mdb);
}
-uint64_t my_mysql_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+const char *B_DB_MYSQL::sql_strerror(void)
+{
+ return mysql_error(m_db_handle);
+}
+
+void B_DB_MYSQL::sql_data_seek(int row)
+{
+ return mysql_data_seek(m_result, row);
+}
+
+int B_DB_MYSQL::sql_affected_rows(void)
+{
+ return mysql_affected_rows(m_db_handle);
+}
+
+uint64_t B_DB_MYSQL::sql_insert_autokey_record(const char *query, const char *table_name)
{
/*
* First execute the insert query and then retrieve the currval.
*/
- if (mysql_query(mdb->db, query)) {
+ if (mysql_query(m_db_handle, query) != 0) {
return 0;
}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows != 1) {
+ m_num_rows = mysql_affected_rows(m_db_handle);
+ if (m_num_rows != 1) {
return 0;
}
- mdb->changes++;
+ changes++;
- return mysql_insert_id(mdb->db);
+ return mysql_insert_id(m_db_handle);
}
+SQL_FIELD *B_DB_MYSQL::sql_fetch_field(void)
+{
+ int i;
+ MYSQL_FIELD *field;
-#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_mysql_batch_lock_path_query =
- "LOCK TABLES Path write, batch write, Path as p write";
+ if (!m_fields || m_fields_size < m_num_fields) {
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ Dmsg1(500, "allocating space for %d fields\n", m_num_fields);
+ m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * m_num_fields);
+ m_fields_size = m_num_fields;
+
+ for (i = 0; i < m_num_fields; i++) {
+ Dmsg1(500, "filling field %d\n", i);
+ if ((field = mysql_fetch_field(m_result)) != NULL) {
+ m_fields[i].name = field->name;
+ m_fields[i].max_length = field->max_length;
+ m_fields[i].type = field->type;
+ m_fields[i].flags = field->flags;
+
+ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ m_fields[i].name, m_fields[i].max_length, m_fields[i].type, m_fields[i].flags);
+ }
+ }
+ }
+ /*
+ * Increment field number for the next time around
+ */
+ return &m_fields[m_field_number++];
+}
-const char *my_mysql_batch_lock_filename_query =
- "LOCK TABLES Filename write, batch write, Filename as f write";
+bool B_DB_MYSQL::sql_field_is_not_null(int field_type)
+{
+ return IS_NOT_NULL(field_type);
+}
-const char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES";
+bool B_DB_MYSQL::sql_field_is_numeric(int field_type)
+{
+ return IS_NUM(field_type);
+}
-const char *my_mysql_batch_fill_path_query =
- "INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
- "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)";
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_MYSQL::sql_batch_start(JCR *jcr)
+{
+ bool retval;
+
+ db_lock(this);
+ retval = sql_query("CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob,"
+ "MarkId integer)");
+ db_unlock(this);
+
+ return retval;
+}
+
+/* set error to something to abort operation */
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_MYSQL::sql_batch_end(JCR *jcr, const char *error)
+{
+ m_status = 0;
-const char *my_mysql_batch_fill_filename_query =
- "INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
- "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)";
-#endif /* HAVE_BATCH_FILE_INSERT */
+ return true;
+}
+
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_MYSQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar)
+{
+ size_t len;
+ const char *digest;
+ char ed1[50];
+
+ esc_name = check_pool_memory_size(esc_name, fnl*2+1);
+ db_escape_string(jcr, esc_name, fname, fnl);
+
+ esc_path = check_pool_memory_size(esc_path, pnl*2+1);
+ db_escape_string(jcr, esc_path, path, pnl);
+
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+
+ len = Mmsg(cmd, "INSERT INTO batch VALUES "
+ "(%u,%s,'%s','%s','%s','%s',%u)",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ return sql_query(cmd);
+}
+
+/*
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
+ */
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
+ const char *db_password, const char *db_address, int db_port, const char *db_socket,
+ bool mult_db_connections, bool disable_batch_insert)
+{
+ B_DB_MYSQL *mdb = NULL;
+
+ if (!db_user) {
+ Jmsg(jcr, M_FATAL, 0, _("A user name for MySQL must be supplied.\n"));
+ return NULL;
+ }
+ P(mutex); /* lock DB queue */
+
+ /*
+ * Look to see if DB already open
+ */
+ if (db_list && !mult_db_connections) {
+ foreach_dlist(mdb, db_list) {
+ if (mdb->db_match_database(db_driver, db_name, db_address, db_port)) {
+ Dmsg1(100, "DB REopen %s\n", db_name);
+ mdb->increment_refcount();
+ goto bail_out;
+ }
+ }
+ }
+ Dmsg0(100, "db_init_database first time\n");
+ mdb = New(B_DB_MYSQL(jcr, db_driver, db_name, db_user, db_password, db_address,
+ db_port, db_socket, mult_db_connections, disable_batch_insert));
+
+bail_out:
+ V(mutex);
+ return mdb;
+}
#endif /* HAVE_MYSQL */
#
# shell script to create Bacula MySQL tables
#
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
db_name=@db_name@
$bindir/mysql $* ${db_name}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
* Dan Langille, December 2003
* based upon work done by Kern Sibbald, March 2000
*
+ * Major rewrite by Marco van Wieringen, January 2010 for catalog refactoring.
*/
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
#ifdef HAVE_POSTGRESQL
+#include "cats.h"
+#include "bdb_priv.h"
+#include "libpq-fe.h"
#include "postgres_ext.h" /* needed for NAMEDATALEN */
#include "pg_config_manual.h" /* get NAMEDATALEN on version 8.3 or later */
+#include "bdb_postgresql.h"
/* -----------------------------------------------------------------------
*
* -----------------------------------------------------------------------
*/
-/* List of open databases */
-static dlist *db_list = NULL;
-
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-
/*
- * Retrieve database type
+ * List of open databases
*/
-const char *
-db_get_type(void)
-{
- return "PostgreSQL";
+static dlist *db_list = NULL;
-}
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-/*
- * Initialize database data structure. In principal this should
- * never have errors, or it is really fatal.
- */
-B_DB *
-db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
- int mult_db_connections)
+B_DB_POSTGRESQL::B_DB_POSTGRESQL(JCR *jcr,
+ const char *db_driver,
+ const char *db_name,
+ const char *db_user,
+ const char *db_password,
+ const char *db_address,
+ int db_port,
+ const char *db_socket,
+ bool mult_db_connections,
+ bool disable_batch_insert)
{
- B_DB *mdb = NULL;
-
- if (!db_user) {
- Jmsg(jcr, M_FATAL, 0, _("A user name for PostgreSQL must be supplied.\n"));
- return NULL;
- }
- P(mutex); /* lock DB queue */
- if (db_list == NULL) {
- db_list = New(dlist(mdb, &mdb->link));
- }
- if (!mult_db_connections) {
- /* Look to see if DB already open */
- foreach_dlist(mdb, db_list) {
- if (bstrcmp(mdb->db_name, db_name) &&
- bstrcmp(mdb->db_address, db_address) &&
- mdb->db_port == db_port) {
- Dmsg2(100, "DB REopen %d %s\n", mdb->ref_count, db_name);
- mdb->ref_count++;
- V(mutex);
- return mdb; /* already open */
- }
- }
- }
- Dmsg0(100, "db_open first time\n");
- mdb = (B_DB *)malloc(sizeof(B_DB));
- memset(mdb, 0, sizeof(B_DB));
- mdb->db_name = bstrdup(db_name);
- mdb->db_user = bstrdup(db_user);
+ /*
+ * Initialize the parent class members.
+ */
+ m_db_interface_type = SQL_INTERFACE_TYPE_POSTGRESQL;
+ m_db_type = SQL_TYPE_POSTGRESQL;
+ m_db_driver = bstrdup("PostgreSQL");
+ m_db_name = bstrdup(db_name);
+ m_db_user = bstrdup(db_user);
if (db_password) {
- mdb->db_password = bstrdup(db_password);
+ m_db_password = bstrdup(db_password);
}
if (db_address) {
- mdb->db_address = bstrdup(db_address);
+ m_db_address = bstrdup(db_address);
}
if (db_socket) {
- mdb->db_socket = bstrdup(db_socket);
- }
- mdb->db_port = db_port;
- mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
- *mdb->errmsg = 0;
- mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
- mdb->cached_path = get_pool_memory(PM_FNAME);
- mdb->cached_path_id = 0;
- mdb->ref_count = 1;
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
- mdb->esc_path = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- db_list->append(mdb); /* put db in list */
- V(mutex);
- return mdb;
+ m_db_socket = bstrdup(db_socket);
+ }
+ m_db_port = db_port;
+ if (disable_batch_insert) {
+ m_disabled_batch_insert = true;
+ m_have_batch_insert = false;
+ } else {
+ m_disabled_batch_insert = false;
+#if defined(USE_BATCH_FILE_INSERT)
+#if defined(HAVE_POSTGRESQL_BATCH_FILE_INSERT) || defined(HAVE_PQISTHREADSAFE)
+#ifdef HAVE_PQISTHREADSAFE
+ m_have_batch_insert = PQisthreadsafe();
+#else
+ m_have_batch_insert = true;
+#endif /* HAVE_PQISTHREADSAFE */
+#else
+ m_have_batch_insert = true;
+#endif /* HAVE_POSTGRESQL_BATCH_FILE_INSERT || HAVE_PQISTHREADSAFE */
+#else
+ m_have_batch_insert = false;
+#endif /* USE_BATCH_FILE_INSERT */
+ }
+ errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *errmsg = 0;
+ cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ cached_path = get_pool_memory(PM_FNAME);
+ cached_path_id = 0;
+ m_ref_count = 1;
+ fname = get_pool_memory(PM_FNAME);
+ path = get_pool_memory(PM_FNAME);
+ esc_name = get_pool_memory(PM_FNAME);
+ esc_path = get_pool_memory(PM_FNAME);
+ m_allow_transactions = mult_db_connections;
+
+ /*
+ * Initialize the private members.
+ */
+ m_db_handle = NULL;
+ m_result = NULL;
+
+ /*
+ * Put the db in the list.
+ */
+ if (db_list == NULL) {
+ db_list = New(dlist(this, &this->m_link));
+ }
+ db_list->append(this);
+}
+
+B_DB_POSTGRESQL::~B_DB_POSTGRESQL()
+{
}
-/* Check that the database correspond to the encoding we want */
-static bool check_database_encoding(JCR *jcr, B_DB *mdb)
+/*
+ * Check that the database correspond to the encoding we want
+ */
+static bool pgsql_check_database_encoding(JCR *jcr, B_DB_POSTGRESQL *mdb)
{
SQL_ROW row;
- int ret=false;
+ int ret = false;
- if (!db_sql_query(mdb, "SELECT getdatabaseencoding()", NULL, NULL)) {
+ if (!mdb->sql_query("SELECT getdatabaseencoding()", QF_STORE_RESULT)) {
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
return false;
}
- if ((row = sql_fetch_row(mdb)) == NULL) {
- Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
+ if ((row = mdb->sql_fetch_row()) == NULL) {
+ Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), mdb->sql_strerror());
Jmsg(jcr, M_ERROR, 0, "Can't check database encoding %s", mdb->errmsg);
} else {
ret = bstrcmp(row[0], "SQL_ASCII");
if (ret) {
- /* if we are in SQL_ASCII, we can force the client_encoding to SQL_ASCII too */
- db_sql_query(mdb, "SET client_encoding TO 'SQL_ASCII'", NULL, NULL);
+ /*
+ * If we are in SQL_ASCII, we can force the client_encoding to SQL_ASCII too
+ */
+ mdb->sql_query("SET client_encoding TO 'SQL_ASCII'");
- } else { /* something is wrong with database encoding */
+ } else {
+ /*
+ * Something is wrong with database encoding
+ */
Mmsg(mdb->errmsg,
_("Encoding error for database \"%s\". Wanted SQL_ASCII, got %s\n"),
- mdb->db_name, row[0]);
+ mdb->get_db_name(), row[0]);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
Dmsg1(50, "%s", mdb->errmsg);
}
* Now actually open the database. This can generate errors,
* which are returned in the errmsg
*
- * DO NOT close the database or free(mdb) here !!!!
+ * DO NOT close the database or delete mdb here !!!!
*/
-int
-db_open_database(JCR *jcr, B_DB *mdb)
+bool B_DB_POSTGRESQL::db_open_database(JCR *jcr)
{
+ bool retval = false;
int errstat;
char buf[10], *port;
P(mutex);
- if (mdb->connected) {
- V(mutex);
- return 1;
+ if (m_connected) {
+ retval = true;
+ goto bail_out;
}
- mdb->connected = false;
- if ((errstat=rwl_init(&mdb->lock)) != 0) {
+ if ((errstat=rwl_init(&m_lock)) != 0) {
berrno be;
- Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ Mmsg1(&errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
be.bstrerror(errstat));
- V(mutex);
- return 0;
+ goto bail_out;
}
- if (mdb->db_port) {
- bsnprintf(buf, sizeof(buf), "%d", mdb->db_port);
+ if (m_db_port) {
+ bsnprintf(buf, sizeof(buf), "%d", m_db_port);
port = buf;
} else {
port = NULL;
/* If connection fails, try at 5 sec intervals for 30 seconds. */
for (int retry=0; retry < 6; retry++) {
/* connect to the database */
- mdb->db = PQsetdbLogin(
- mdb->db_address, /* default = localhost */
- port, /* default port */
- NULL, /* pg options */
- NULL, /* tty, ignored */
- mdb->db_name, /* database name */
- mdb->db_user, /* login name */
- mdb->db_password); /* password */
+ m_db_handle = PQsetdbLogin(
+ m_db_address, /* default = localhost */
+ port, /* default port */
+ NULL, /* pg options */
+ NULL, /* tty, ignored */
+ m_db_name, /* database name */
+ m_db_user, /* login name */
+ m_db_password); /* password */
/* If no connect, try once more in case it is a timing problem */
- if (PQstatus(mdb->db) == CONNECTION_OK) {
+ if (PQstatus(m_db_handle) == CONNECTION_OK) {
break;
}
bmicrosleep(5, 0);
}
Dmsg0(50, "pg_real_connect done\n");
- Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", mdb->db_user, mdb->db_name,
- mdb->db_password==NULL?"(NULL)":mdb->db_password);
+ Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n", m_db_user, m_db_name,
+ (m_db_password == NULL) ? "(NULL)" : m_db_password);
- if (PQstatus(mdb->db) != CONNECTION_OK) {
- Mmsg2(&mdb->errmsg, _("Unable to connect to PostgreSQL server. Database=%s User=%s\n"
+ if (PQstatus(m_db_handle) != CONNECTION_OK) {
+ Mmsg2(&errmsg, _("Unable to connect to PostgreSQL server. Database=%s User=%s\n"
"Possible causes: SQL server not running; password incorrect; max_connections exceeded.\n"),
- mdb->db_name, mdb->db_user);
- V(mutex);
- return 0;
+ m_db_name, m_db_user);
+ goto bail_out;
}
- mdb->connected = true;
-
- if (!check_tables_version(jcr, mdb)) {
- V(mutex);
- return 0;
+ m_connected = true;
+ if (!check_tables_version(jcr, this)) {
+ goto bail_out;
}
- sql_query(mdb, "SET datestyle TO 'ISO, YMD'");
+ sql_query("SET datestyle TO 'ISO, YMD'");
- /* tell PostgreSQL we are using standard conforming strings
- and avoid warnings such as:
- WARNING: nonstandard use of \\ in a string literal
- */
- sql_query(mdb, "set standard_conforming_strings=on");
+ /*
+ * Tell PostgreSQL we are using standard conforming strings
+ * and avoid warnings such as:
+ * WARNING: nonstandard use of \\ in a string literal
+ */
+ sql_query("SET standard_conforming_strings=on");
+
+ /*
+ * Check that encoding is SQL_ASCII
+ */
+ pgsql_check_database_encoding(jcr, this);
- /* check that encoding is SQL_ASCII */
- check_database_encoding(jcr, mdb);
+ retval = true;
+bail_out:
V(mutex);
- return 1;
+ return retval;
}
-void
-db_close_database(JCR *jcr, B_DB *mdb)
+void B_DB_POSTGRESQL::db_close_database(JCR *jcr)
{
- if (!mdb) {
- return;
- }
- db_end_transaction(jcr, mdb);
+ db_end_transaction(jcr);
P(mutex);
- sql_free_result(mdb);
- mdb->ref_count--;
- if (mdb->ref_count == 0) {
- db_list->remove(mdb);
- if (mdb->connected && mdb->db) {
- sql_close(mdb);
+ sql_free_result();
+ m_ref_count--;
+ if (m_ref_count == 0) {
+ db_list->remove(this);
+ if (m_connected && m_db_handle) {
+ PQfinish(m_db_handle);
}
- rwl_destroy(&mdb->lock);
- free_pool_memory(mdb->errmsg);
- free_pool_memory(mdb->cmd);
- free_pool_memory(mdb->cached_path);
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
- free_pool_memory(mdb->esc_path);
- if (mdb->db_name) {
- free(mdb->db_name);
+ rwl_destroy(&m_lock);
+ free_pool_memory(errmsg);
+ free_pool_memory(cmd);
+ free_pool_memory(cached_path);
+ free_pool_memory(fname);
+ free_pool_memory(path);
+ free_pool_memory(esc_name);
+ free_pool_memory(esc_path);
+ if (m_db_driver) {
+ free(m_db_driver);
}
- if (mdb->db_user) {
- free(mdb->db_user);
+ if (m_db_name) {
+ free(m_db_name);
}
- if (mdb->db_password) {
- free(mdb->db_password);
+ if (m_db_user) {
+ free(m_db_user);
}
- if (mdb->db_address) {
- free(mdb->db_address);
+ if (m_db_password) {
+ free(m_db_password);
}
- if (mdb->db_socket) {
- free(mdb->db_socket);
+ if (m_db_address) {
+ free(m_db_address);
}
- if (mdb->esc_obj) {
- PQfreemem(mdb->esc_obj);
+ if (m_db_socket) {
+ free(m_db_socket);
}
- free(mdb);
+ if (esc_obj) {
+ PQfreemem(esc_obj);
+ }
+ delete this;
if (db_list->size() == 0) {
delete db_list;
db_list = NULL;
V(mutex);
}
-void db_check_backend_thread_safe()
+void B_DB_POSTGRESQL::db_thread_cleanup(void)
{
-#ifdef HAVE_BATCH_FILE_INSERT
-# ifdef HAVE_PQISTHREADSAFE
- if (!PQisthreadsafe()) {
- Emsg0(M_ABORT, 0, _("Pg client library must be thread-safe "
- "when using BatchMode.\n"));
- }
-# endif
-#endif
}
-void db_thread_cleanup()
-{ }
-
/*
- * Return the next unique index (auto-increment) for
- * the given table. Return NULL on error.
+ * Escape strings so that PostgreSQL is happy
*
- * For PostgreSQL, NULL causes the auto-increment value
- * to be updated.
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
*/
-int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index)
+void B_DB_POSTGRESQL::db_escape_string(JCR *jcr, char *snew, char *old, int len)
{
- strcpy(index, "NULL");
- return 1;
+ int error;
+
+ PQescapeStringConn(m_db_handle, snew, old, len, &error);
+ if (error) {
+ Jmsg(jcr, M_FATAL, 0, _("PQescapeStringConn returned non-zero.\n"));
+ /* error on encoding, probably invalid multibyte encoding in the source string
+ see PQescapeStringConn documentation for details. */
+ Dmsg0(500, "PQescapeStringConn failed\n");
+ }
}
/*
* Escape binary so that PostgreSQL is happy
*
*/
-char *
-db_escape_object(JCR *jcr, B_DB *mdb, char *old, int len)
+char *B_DB_POSTGRESQL::db_escape_object(JCR *jcr, char *old, int len)
{
size_t new_len;
- if (mdb->esc_obj) {
- PQfreemem(mdb->esc_obj);
- }
-
- mdb->esc_obj = PQescapeByteaConn(mdb->db, (unsigned const char *)old,
- len, &new_len);
+ unsigned char *obj;
- if (!mdb->esc_obj) {
+ obj = PQescapeByteaConn(m_db_handle, (unsigned const char *)old, len, &new_len);
+ if (!obj) {
Jmsg(jcr, M_FATAL, 0, _("PQescapeByteaConn returned NULL.\n"));
}
- return (char *)mdb->esc_obj;
+ esc_obj = check_pool_memory_size(esc_obj, new_len+1);
+ memcpy(esc_obj, obj, new_len);
+ esc_obj[new_len]=0;
+
+ PQfreemem(obj);
+
+ return (char *)esc_obj;
}
/*
* Unescape binary object so that PostgreSQL is happy
*
*/
-void
-db_unescape_object(JCR *jcr, B_DB *mdb,
- char *from, int32_t expected_len,
- POOLMEM **dest, int32_t *dest_len)
+void B_DB_POSTGRESQL::db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *dest_len)
{
size_t new_len;
unsigned char *obj;
}
/*
- * Escape strings so that PostgreSQL is happy
- *
- * NOTE! len is the length of the old string. Your new
- * string must be long enough (max 2*old+1) to hold
- * the escaped output.
+ * Start a transaction. This groups inserts and makes things
+ * much more efficient. Usually started when inserting
+ * file attributes.
*/
-void
-db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+void B_DB_POSTGRESQL::db_start_transaction(JCR *jcr)
{
- int error;
-
- PQescapeStringConn(mdb->db, snew, old, len, &error);
- if (error) {
- Jmsg(jcr, M_FATAL, 0, _("PQescapeStringConn returned non-zero.\n"));
- /* error on encoding, probably invalid multibyte encoding in the source string
- see PQescapeStringConn documentation for details. */
- Dmsg0(500, "PQescapeStringConn failed\n");
+ if (!jcr->attr) {
+ jcr->attr = get_pool_memory(PM_FNAME);
}
-}
-
-/*
- * Submit a general SQL command (cmd), and for each row returned,
- * the sqlite_handler is called with the ctx.
- */
-bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
-{
- SQL_ROW row;
-
- Dmsg0(500, "db_sql_query started\n");
-
- db_lock(mdb);
- if (sql_query(mdb, query) != 0) {
- Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb));
- db_unlock(mdb);
- Dmsg0(500, "db_sql_query failed\n");
- return false;
+ if (!jcr->ar) {
+ jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
}
- Dmsg0(500, "db_sql_query succeeded. checking handler\n");
- if (result_handler != NULL) {
- Dmsg0(500, "db_sql_query invoking handler\n");
- if ((mdb->result = sql_store_result(mdb)) != NULL) {
- int num_fields = sql_num_fields(mdb);
-
- Dmsg0(500, "db_sql_query sql_store_result suceeded\n");
- while ((row = sql_fetch_row(mdb)) != NULL) {
-
- Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
- if (result_handler(ctx, num_fields, row))
- break;
- }
-
- sql_free_result(mdb);
- }
+ /*
+ * This is turned off because transactions break
+ * if multiple simultaneous jobs are run.
+ */
+ if (!m_allow_transactions) {
+ return;
}
- db_unlock(mdb);
- Dmsg0(500, "db_sql_query finished\n");
-
- return true;
+ db_lock(this);
+ /*
+ * Allow only 25,000 changes per transaction
+ */
+ if (m_transaction && changes > 25000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start PosgreSQL transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
}
-
-
-POSTGRESQL_ROW my_postgresql_fetch_row(B_DB *mdb)
+void B_DB_POSTGRESQL::db_end_transaction(JCR *jcr)
{
- int j;
- POSTGRESQL_ROW row = NULL; // by default, return NULL
-
- Dmsg0(500, "my_postgresql_fetch_row start\n");
-
- if (!mdb->row || mdb->row_size < mdb->num_fields) {
- int num_fields = mdb->num_fields;
- Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields);
-
- if (mdb->row) {
- Dmsg0(500, "my_postgresql_fetch_row freeing space\n");
- free(mdb->row);
+ if (jcr && jcr->cached_attribute) {
+ Dmsg0(400, "Flush last cached attribute.\n");
+ if (!db_create_attributes_record(jcr, this, jcr->ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
- num_fields += 20; /* add a bit extra */
- mdb->row = (POSTGRESQL_ROW)malloc(sizeof(char *) * num_fields);
- mdb->row_size = num_fields;
-
- // now reset the row_number now that we have the space allocated
- mdb->row_number = 0;
+ jcr->cached_attribute = false;
}
- // if still within the result set
- if (mdb->row_number >= 0 && mdb->row_number < mdb->num_rows) {
- Dmsg2(500, "my_postgresql_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
- // get each value from this row
- for (j = 0; j < mdb->num_fields; j++) {
- mdb->row[j] = PQgetvalue(mdb->result, mdb->row_number, j);
- Dmsg2(500, "my_postgresql_fetch_row field '%d' has value '%s'\n", j, mdb->row[j]);
- }
- // increment the row number for the next call
- mdb->row_number++;
-
- row = mdb->row;
- } else {
- Dmsg2(500, "my_postgresql_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
+ if (!m_allow_transactions) {
+ return;
}
- Dmsg1(500, "my_postgresql_fetch_row finishes returning %p\n", row);
-
- return row;
-}
-
-int my_postgresql_max_length(B_DB *mdb, int field_num) {
- //
- // for a given column, find the max length
- //
- int max_length;
- int i;
- int this_length;
-
- max_length = 0;
- for (i = 0; i < mdb->num_rows; i++) {
- if (PQgetisnull(mdb->result, i, field_num)) {
- this_length = 4; // "NULL"
- } else {
- this_length = cstrlen(PQgetvalue(mdb->result, i, field_num));
- }
-
- if (max_length < this_length) {
- max_length = this_length;
- }
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End PostgreSQL transaction changes=%d\n", changes);
}
-
- return max_length;
+ changes = 0;
+ db_unlock(this);
}
-POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb)
+/*
+ * Submit a general SQL command (cmd), and for each row returned,
+ * the result_handler is called with the ctx.
+ */
+bool B_DB_POSTGRESQL::db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
{
- int i;
-
- Dmsg0(500, "my_postgresql_fetch_field starts\n");
-
- if (!mdb->fields || mdb->fields_size < mdb->num_fields) {
- if (mdb->fields) {
- free(mdb->fields);
- }
- Dmsg1(500, "allocating space for %d fields\n", mdb->num_fields);
- mdb->fields = (POSTGRESQL_FIELD *)malloc(sizeof(POSTGRESQL_FIELD) * mdb->num_fields);
- mdb->fields_size = mdb->num_fields;
+ SQL_ROW row;
+ bool retval = true;
- for (i = 0; i < mdb->num_fields; i++) {
- Dmsg1(500, "filling field %d\n", i);
- mdb->fields[i].name = PQfname(mdb->result, i);
- mdb->fields[i].max_length = my_postgresql_max_length(mdb, i);
- mdb->fields[i].type = PQftype(mdb->result, i);
- mdb->fields[i].flags = 0;
+ Dmsg1(500, "db_sql_query starts with '%s'\n", query);
- Dmsg4(500, "my_postgresql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
- mdb->fields[i].name, mdb->fields[i].max_length, mdb->fields[i].type,
- mdb->fields[i].flags);
- } // end for
- } // end if
+ db_lock(this);
+ if (!sql_query(query, QF_STORE_RESULT)) {
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror());
+ Dmsg0(500, "db_sql_query failed\n");
+ retval = false;
+ goto bail_out;
+ }
- // increment field number for the next time around
+ Dmsg0(500, "db_sql_query succeeded. checking handler\n");
- Dmsg0(500, "my_postgresql_fetch_field finishes\n");
- return &mdb->fields[mdb->field_number++];
-}
+ if (result_handler != NULL) {
+ Dmsg0(500, "db_sql_query invoking handler\n");
+ while ((row = sql_fetch_row()) != NULL) {
+ Dmsg0(500, "db_sql_query sql_fetch_row worked\n");
+ if (result_handler(ctx, m_num_fields, row))
+ break;
+ }
+ sql_free_result();
+ }
-void my_postgresql_data_seek(B_DB *mdb, int row)
-{
- // set the row number to be returned on the next call
- // to my_postgresql_fetch_row
- mdb->row_number = row;
-}
+ Dmsg0(500, "db_sql_query finished\n");
-void my_postgresql_field_seek(B_DB *mdb, int field)
-{
- mdb->field_number = field;
+bail_out:
+ db_unlock(this);
+ return retval;
}
/*
- * Note, if this routine returns 1 (failure), Bacula expects
- * that no result has been stored.
+ * Note, if this routine returns false (failure), Bacula expects
+ * that no result has been stored.
* This is where QUERY_DB comes with Postgresql.
*
- * Returns: 0 on success
- * 1 on failure
+ * Returns: true on success
+ * false on failure
*
*/
-int my_postgresql_query(B_DB *mdb, const char *query)
+bool B_DB_POSTGRESQL::sql_query(const char *query, int flags)
{
- Dmsg0(500, "my_postgresql_query started\n");
- // We are starting a new query. reset everything.
- mdb->num_rows = -1;
- mdb->row_number = -1;
- mdb->field_number = -1;
+ int i;
+ bool retval = false;
- if (mdb->result) {
- PQclear(mdb->result); /* hmm, someone forgot to free?? */
- mdb->result = NULL;
- }
+ Dmsg1(500, "sql_query starts with '%s'\n", query);
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
- Dmsg1(500, "my_postgresql_query starts with '%s'\n", query);
+ if (m_result) {
+ PQclear(m_result); /* hmm, someone forgot to free?? */
+ m_result = NULL;
+ }
- for (int i=0; i < 10; i++) {
- mdb->result = PQexec(mdb->db, query);
- if (mdb->result) {
+ for (i = 0; i < 10; i++) {
+ m_result = PQexec(m_db_handle, query);
+ if (m_result) {
break;
}
bmicrosleep(5, 0);
}
- if (!mdb->result) {
+ if (!m_result) {
Dmsg1(50, "Query failed: %s\n", query);
goto bail_out;
}
- mdb->status = PQresultStatus(mdb->result);
- if (mdb->status == PGRES_TUPLES_OK || mdb->status == PGRES_COMMAND_OK) {
- Dmsg1(500, "we have a result\n", query);
+ m_status = PQresultStatus(m_result);
+ if (m_status == PGRES_TUPLES_OK || m_status == PGRES_COMMAND_OK) {
+ Dmsg0(500, "we have a result\n");
- // how many fields in the set?
- mdb->num_fields = (int)PQnfields(mdb->result);
- Dmsg1(500, "we have %d fields\n", mdb->num_fields);
+ /*
+ * How many fields in the set?
+ */
+ m_num_fields = (int)PQnfields(m_result);
+ Dmsg1(500, "we have %d fields\n", m_num_fields);
- mdb->num_rows = PQntuples(mdb->result);
- Dmsg1(500, "we have %d rows\n", mdb->num_rows);
+ m_num_rows = PQntuples(m_result);
+ Dmsg1(500, "we have %d rows\n", m_num_rows);
- mdb->row_number = 0; /* we can start to fetch something */
- mdb->status = 0; /* succeed */
+ m_row_number = 0; /* we can start to fetch something */
+ m_status = 0; /* succeed */
+ retval = true;
} else {
Dmsg1(50, "Result status failed: %s\n", query);
goto bail_out;
}
- Dmsg0(500, "my_postgresql_query finishing\n");
- return mdb->status;
+ Dmsg0(500, "sql_query finishing\n");
+ goto ok_out;
bail_out:
- Dmsg1(500, "we failed\n", query);
- PQclear(mdb->result);
- mdb->result = NULL;
- mdb->status = 1; /* failed */
- return mdb->status;
+ Dmsg0(500, "we failed\n");
+ PQclear(m_result);
+ m_result = NULL;
+ m_status = 1; /* failed */
+
+ok_out:
+ return retval;
}
-void my_postgresql_free_result(B_DB *mdb)
+void B_DB_POSTGRESQL::sql_free_result(void)
{
-
- db_lock(mdb);
- if (mdb->result) {
- PQclear(mdb->result);
- mdb->result = NULL;
+ db_lock(this);
+ if (m_result) {
+ PQclear(m_result);
+ m_result = NULL;
}
+ if (m_rows) {
+ free(m_rows);
+ m_rows = NULL;
+ }
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ m_num_rows = m_num_fields = 0;
+ db_unlock(this);
+}
- if (mdb->row) {
- free(mdb->row);
- mdb->row = NULL;
+SQL_ROW B_DB_POSTGRESQL::sql_fetch_row(void)
+{
+ int j;
+ SQL_ROW row = NULL; /* by default, return NULL */
+
+ Dmsg0(500, "sql_fetch_row start\n");
+
+ if (!m_rows || m_rows_size < m_num_fields) {
+ if (m_rows) {
+ Dmsg0(500, "sql_fetch_row freeing space\n");
+ free(m_rows);
+ }
+ Dmsg1(500, "we need space for %d bytes\n", sizeof(char *) * m_num_fields);
+ m_rows = (SQL_ROW)malloc(sizeof(char *) * m_num_fields);
+ m_rows_size = m_num_fields;
+
+ /*
+ * Now reset the row_number now that we have the space allocated
+ */
+ m_row_number = 0;
}
- if (mdb->fields) {
- free(mdb->fields);
- mdb->fields = NULL;
+ /*
+ * If still within the result set
+ */
+ if (m_row_number >= 0 && m_row_number < m_num_rows) {
+ Dmsg2(500, "sql_fetch_row row number '%d' is acceptable (0..%d)\n", m_row_number, m_num_rows);
+ /*
+ * Get each value from this row
+ */
+ for (j = 0; j < m_num_fields; j++) {
+ m_rows[j] = PQgetvalue(m_result, m_row_number, j);
+ Dmsg2(500, "sql_fetch_row field '%d' has value '%s'\n", j, m_rows[j]);
+ }
+ /*
+ * Increment the row number for the next call
+ */
+ m_row_number++;
+ row = m_rows;
+ } else {
+ Dmsg2(500, "sql_fetch_row row number '%d' is NOT acceptable (0..%d)\n", m_row_number, m_num_rows);
}
- db_unlock(mdb);
+
+ Dmsg1(500, "sql_fetch_row finishes returning %p\n", row);
+
+ return row;
}
-static uint64_t my_postgresql_currval(B_DB *mdb, const char *table_name)
+const char *B_DB_POSTGRESQL::sql_strerror(void)
{
- // Obtain the current value of the sequence that
- // provides the serial value for primary key of the table.
+ return PQerrorMessage(m_db_handle);
+}
- // currval is local to our session. It is not affected by
- // other transactions.
+void B_DB_POSTGRESQL::sql_data_seek(int row)
+{
+ /*
+ * Set the row number to be returned on the next call to sql_fetch_row
+ */
+ m_row_number = row;
+}
- // Determine the name of the sequence.
- // PostgreSQL automatically creates a sequence using
- // <table>_<column>_seq.
- // At the time of writing, all tables used this format for
- // for their primary key: <table>id
- // Except for basefiles which has a primary key on baseid.
- // Therefore, we need to special case that one table.
+int B_DB_POSTGRESQL::sql_affected_rows(void)
+{
+ return (unsigned) str_to_int32(PQcmdTuples(m_result));
+}
+
+uint64_t B_DB_POSTGRESQL::sql_insert_autokey_record(const char *query, const char *table_name)
+{
+ int i;
+ uint64_t id = 0;
+ char sequence[NAMEDATALEN-1];
+ char getkeyval_query[NAMEDATALEN+50];
+ PGresult *pg_result;
+
+ /*
+ * First execute the insert query and then retrieve the currval.
+ */
+ if (!sql_query(query)) {
+ return 0;
+ }
- // everything else can use the PostgreSQL formula.
+ m_num_rows = sql_affected_rows();
+ if (m_num_rows != 1) {
+ return 0;
+ }
- char sequence[NAMEDATALEN-1];
- char query [NAMEDATALEN+50];
- PGresult *result;
- uint64_t id = 0;
+ changes++;
+ /*
+ * Obtain the current value of the sequence that
+ * provides the serial value for primary key of the table.
+ *
+ * currval is local to our session. It is not affected by
+ * other transactions.
+ *
+ * Determine the name of the sequence.
+ * PostgreSQL automatically creates a sequence using
+ * <table>_<column>_seq.
+ * At the time of writing, all tables used this format for
+ * for their primary key: <table>id
+ * Except for basefiles which has a primary key on baseid.
+ * Therefore, we need to special case that one table.
+ *
+ * everything else can use the PostgreSQL formula.
+ */
if (strcasecmp(table_name, "basefiles") == 0) {
bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
} else {
}
bstrncat(sequence, "_seq", sizeof(sequence));
- bsnprintf(query, sizeof(query), "SELECT currval('%s')", sequence);
+ bsnprintf(getkeyval_query, sizeof(getkeyval_query), "SELECT currval('%s')", sequence);
- Dmsg1(500, "my_postgresql_currval invoked with '%s'\n", query);
- for (int i=0; i < 10; i++) {
- result = PQexec(mdb->db, query);
- if (result) {
+ Dmsg1(500, "sql_insert_autokey_record executing query '%s'\n", getkeyval_query);
+ for (i = 0; i < 10; i++) {
+ pg_result = PQexec(m_db_handle, getkeyval_query);
+ if (pg_result) {
break;
}
bmicrosleep(5, 0);
}
- if (!result) {
- Dmsg1(50, "Query failed: %s\n", query);
+ if (!pg_result) {
+ Dmsg1(50, "Query failed: %s\n", getkeyval_query);
goto bail_out;
}
Dmsg0(500, "exec done");
- if (PQresultStatus(result) == PGRES_TUPLES_OK) {
+ if (PQresultStatus(pg_result) == PGRES_TUPLES_OK) {
Dmsg0(500, "getting value");
- id = str_to_uint64(PQgetvalue(result, 0, 0));
- Dmsg2(500, "got value '%s' which became %d\n", PQgetvalue(result, 0, 0), id);
+ id = str_to_uint64(PQgetvalue(pg_result, 0, 0));
+ Dmsg2(500, "got value '%s' which became %d\n", PQgetvalue(pg_result, 0, 0), id);
} else {
- Dmsg1(50, "Result status failed: %s\n", query);
- Mmsg1(&mdb->errmsg, _("error fetching currval: %s\n"), PQerrorMessage(mdb->db));
+ Dmsg1(50, "Result status failed: %s\n", getkeyval_query);
+ Mmsg1(&errmsg, _("error fetching currval: %s\n"), PQerrorMessage(m_db_handle));
}
bail_out:
- PQclear(result);
-
+ PQclear(pg_result);
+
return id;
}
-uint64_t my_postgresql_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+SQL_FIELD *B_DB_POSTGRESQL::sql_fetch_field(void)
{
+ int i, j;
+ int max_length;
+ int this_length;
+
+ Dmsg0(500, "sql_fetch_field starts\n");
+
+ if (!m_fields || m_fields_size < m_num_fields) {
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ Dmsg1(500, "allocating space for %d fields\n", m_num_fields);
+ m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * m_num_fields);
+ m_fields_size = m_num_fields;
+
+ for (i = 0; i < m_num_fields; i++) {
+ Dmsg1(500, "filling field %d\n", i);
+ m_fields[i].name = PQfname(m_result, i);
+ m_fields[i].type = PQftype(m_result, i);
+ m_fields[i].flags = 0;
+
+ /*
+ * For a given column, find the max length.
+ */
+ max_length = 0;
+ for (j = 0; j < m_num_rows; j++) {
+ if (PQgetisnull(m_result, j, i)) {
+ this_length = 4; /* "NULL" */
+ } else {
+ this_length = cstrlen(PQgetvalue(m_result, j, i));
+ }
+
+ if (max_length < this_length) {
+ max_length = this_length;
+ }
+ }
+ m_fields[i].max_length = max_length;
+
+ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ m_fields[i].name, m_fields[i].max_length, m_fields[i].type, m_fields[i].flags);
+ }
+ }
+
/*
- * First execute the insert query and then retrieve the currval.
+ * Increment field number for the next time around
*/
- if (my_postgresql_query(mdb, query)) {
- return 0;
+ return &m_fields[m_field_number++];
+}
+
+bool B_DB_POSTGRESQL::sql_field_is_not_null(int field_type)
+{
+ switch (field_type) {
+ case 1:
+ return true;
+ default:
+ return false;
}
+}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows != 1) {
- return 0;
+bool B_DB_POSTGRESQL::sql_field_is_numeric(int field_type)
+{
+ /*
+ * TEMP: the following is taken from select OID, typname from pg_type;
+ */
+ switch (field_type) {
+ case 20:
+ case 21:
+ case 23:
+ case 700:
+ case 701:
+ return true;
+ default:
+ return false;
}
+}
- mdb->changes++;
+/*
+ * Escape strings so that PostgreSQL is happy on COPY
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ */
+static char *pgsql_copy_escape(char *dest, char *src, size_t len)
+{
+ /* we have to escape \t, \n, \r, \ */
+ char c = '\0' ;
- return my_postgresql_currval(mdb, table_name);
-}
+ while (len > 0 && *src) {
+ switch (*src) {
+ case '\n':
+ c = 'n';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case '\t':
+ c = 't';
+ break;
+ case '\r':
+ c = 'r';
+ break;
+ default:
+ c = '\0' ;
+ }
-#ifdef HAVE_BATCH_FILE_INSERT
+ if (c) {
+ *dest = '\\';
+ dest++;
+ *dest = c;
+ } else {
+ *dest = *src;
+ }
+
+ len--;
+ src++;
+ dest++;
+ }
-int my_postgresql_batch_start(JCR *jcr, B_DB *mdb)
+ *dest = '\0';
+ return dest;
+}
+
+bool B_DB_POSTGRESQL::sql_batch_start(JCR *jcr)
{
const char *query = "COPY batch FROM STDIN";
- Dmsg0(500, "my_postgresql_batch_start started\n");
-
- if (my_postgresql_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
- "fileindex int,"
- "jobid int,"
- "path varchar,"
- "name varchar,"
- "lstat varchar,"
- "md5 varchar,"
- "markid int)") == 1)
- {
- Dmsg0(500, "my_postgresql_batch_start failed\n");
- return 1;
+ Dmsg0(500, "sql_batch_start started\n");
+
+ if (!sql_query("CREATE TEMPORARY TABLE batch ("
+ "fileindex int,"
+ "jobid int,"
+ "path varchar,"
+ "name varchar,"
+ "lstat varchar,"
+ "md5 varchar,"
+ "markid int)")) {
+ Dmsg0(500, "sql_batch_start failed\n");
+ return false;
}
- // We are starting a new query. reset everything.
- mdb->num_rows = -1;
- mdb->row_number = -1;
- mdb->field_number = -1;
+ /*
+ * We are starting a new query. reset everything.
+ */
+ m_num_rows = -1;
+ m_row_number = -1;
+ m_field_number = -1;
- my_postgresql_free_result(mdb);
+ sql_free_result();
for (int i=0; i < 10; i++) {
- mdb->result = PQexec(mdb->db, query);
- if (mdb->result) {
+ m_result = PQexec(m_db_handle, query);
+ if (m_result) {
break;
}
bmicrosleep(5, 0);
}
- if (!mdb->result) {
+ if (!m_result) {
Dmsg1(50, "Query failed: %s\n", query);
goto bail_out;
}
- mdb->status = PQresultStatus(mdb->result);
- if (mdb->status == PGRES_COPY_IN) {
- // how many fields in the set?
- mdb->num_fields = (int) PQnfields(mdb->result);
- mdb->num_rows = 0;
- mdb->status = 1;
+ m_status = PQresultStatus(m_result);
+ if (m_status == PGRES_COPY_IN) {
+ /*
+ * How many fields in the set?
+ */
+ m_num_fields = (int) PQnfields(m_result);
+ m_num_rows = 0;
+ m_status = 1;
} else {
Dmsg1(50, "Result status failed: %s\n", query);
goto bail_out;
}
- Dmsg0(500, "my_postgresql_batch_start finishing\n");
+ Dmsg0(500, "sql_batch_start finishing\n");
- return mdb->status;
+ return true;
bail_out:
- Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->db));
- mdb->status = 0;
- PQclear(mdb->result);
- mdb->result = NULL;
- return mdb->status;
+ Mmsg1(&errmsg, _("error starting batch mode: %s"), PQerrorMessage(m_db_handle));
+ m_status = 0;
+ PQclear(m_result);
+ m_result = NULL;
+ return false;
}
-/* set error to something to abort operation */
-int my_postgresql_batch_end(JCR *jcr, B_DB *mdb, const char *error)
+/*
+ * Set error to something to abort operation
+ */
+bool B_DB_POSTGRESQL::sql_batch_end(JCR *jcr, const char *error)
{
int res;
int count=30;
- PGresult *result;
- Dmsg0(500, "my_postgresql_batch_end started\n");
+ PGresult *pg_result;
- if (!mdb) { /* no files ? */
- return 0;
- }
+ Dmsg0(500, "sql_batch_end started\n");
do {
- res = PQputCopyEnd(mdb->db, error);
+ res = PQputCopyEnd(m_db_handle, error);
} while (res == 0 && --count > 0);
if (res == 1) {
Dmsg0(500, "ok\n");
- mdb->status = 1;
+ m_status = 1;
}
if (res <= 0) {
Dmsg0(500, "we failed\n");
- mdb->status = 0;
- Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+ m_status = 0;
+ Mmsg1(&errmsg, _("error ending batch mode: %s"), PQerrorMessage(m_db_handle));
+ Dmsg1(500, "failure %s\n", errmsg);
}
/* Check command status and return to normal libpq state */
- result = PQgetResult(mdb->db);
- if (PQresultStatus(result) != PGRES_COMMAND_OK) {
- Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
- mdb->status = 0;
+ pg_result = PQgetResult(m_db_handle);
+ if (PQresultStatus(pg_result) != PGRES_COMMAND_OK) {
+ Mmsg1(&errmsg, _("error ending batch mode: %s"), PQerrorMessage(m_db_handle));
+ m_status = 0;
}
- PQclear(result);
+ PQclear(pg_result);
- Dmsg0(500, "my_postgresql_batch_end finishing\n");
+ Dmsg0(500, "sql_batch_end finishing\n");
- return mdb->status;
+ return true;
}
-int my_postgresql_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+bool B_DB_POSTGRESQL::sql_batch_insert(JCR *jcr, ATTR_DBR *ar)
{
int res;
int count=30;
const char *digest;
char ed1[50];
- mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
- my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+ esc_name = check_pool_memory_size(esc_name, fnl*2+1);
+ pgsql_copy_escape(esc_name, fname, fnl);
- mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
- my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
+ esc_path = check_pool_memory_size(esc_path, pnl*2+1);
+ pgsql_copy_escape(esc_path, path, pnl);
if (ar->Digest == NULL || ar->Digest[0] == 0) {
digest = "0";
digest = ar->Digest;
}
- len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\t%u\n",
- ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest, ar->DeltaSeq);
+ len = Mmsg(cmd, "%u\t%s\t%s\t%s\t%s\t%s\t%u\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
do {
- res = PQputCopyData(mdb->db,
- mdb->cmd,
- len);
+ res = PQputCopyData(m_db_handle, cmd, len);
} while (res == 0 && --count > 0);
if (res == 1) {
Dmsg0(500, "ok\n");
- mdb->changes++;
- mdb->status = 1;
+ changes++;
+ m_status = 1;
}
if (res <= 0) {
Dmsg0(500, "we failed\n");
- mdb->status = 0;
- Mmsg1(&mdb->errmsg, _("error copying in batch mode: %s"), PQerrorMessage(mdb->db));
+ m_status = 0;
+ Mmsg1(&errmsg, _("error copying in batch mode: %s"), PQerrorMessage(m_db_handle));
+ Dmsg1(500, "failure %s\n", errmsg);
}
- Dmsg0(500, "my_postgresql_batch_insert finishing\n");
+ Dmsg0(500, "sql_batch_insert finishing\n");
- return mdb->status;
+ return true;
}
-#endif /* HAVE_BATCH_FILE_INSERT */
-
/*
- * Escape strings so that PostgreSQL is happy on COPY
- *
- * NOTE! len is the length of the old string. Your new
- * string must be long enough (max 2*old+1) to hold
- * the escaped output.
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
*/
-char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port,
+ const char *db_socket, bool mult_db_connections,
+ bool disable_batch_insert)
{
- /* we have to escape \t, \n, \r, \ */
- char c = '\0' ;
+ B_DB_POSTGRESQL *mdb = NULL;
- while (len > 0 && *src) {
- switch (*src) {
- case '\n':
- c = 'n';
- break;
- case '\\':
- c = '\\';
- break;
- case '\t':
- c = 't';
- break;
- case '\r':
- c = 'r';
- break;
- default:
- c = '\0' ;
- }
-
- if (c) {
- *dest = '\\';
- dest++;
- *dest = c;
- } else {
- *dest = *src;
+ if (!db_user) {
+ Jmsg(jcr, M_FATAL, 0, _("A user name for PostgreSQL must be supplied.\n"));
+ return NULL;
+ }
+ P(mutex); /* lock DB queue */
+ if (db_list && !mult_db_connections) {
+ /*
+ * Look to see if DB already open
+ */
+ foreach_dlist(mdb, db_list) {
+ if (mdb->db_match_database(db_driver, db_name, db_address, db_port)) {
+ Dmsg1(100, "DB REopen %s\n", db_name);
+ mdb->increment_refcount();
+ goto bail_out;
+ }
}
-
- len--;
- src++;
- dest++;
}
+ Dmsg0(100, "db_init_database first time\n");
+ mdb = New(B_DB_POSTGRESQL(jcr, db_driver, db_name, db_user, db_password,
+ db_address, db_port, db_socket,
+ mult_db_connections, disable_batch_insert));
- *dest = '\0';
- return dest;
+bail_out:
+ V(mutex);
+ return mdb;
}
-#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_pg_batch_lock_path_query =
- "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
-
-
-const char *my_pg_batch_lock_filename_query =
- "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
-
-const char *my_pg_batch_unlock_tables_query = "COMMIT";
-
-const char *my_pg_batch_fill_path_query =
- "INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a "
- "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
-
-
-const char *my_pg_batch_fill_filename_query =
- "INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) as a "
- "WHERE NOT EXISTS "
- "(SELECT Name FROM Filename WHERE Name = a.Name)";
-#endif /* HAVE_BATCH_FILE_INSERT */
-
#endif /* HAVE_POSTGRESQL */
#
# shell script to create Bacula PostgreSQL tables
#
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
db_name=@db_name@
$bindir/psql $* ${db_name}
/* Database prototypes */
/* sql.c */
-B_DB *db_init(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
- const char *db_password, const char *db_address, int db_port,
- const char *db_socket, int mult_db_connections);
-B_DB *db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
- int mult_db_connections);
-int db_open_database(JCR *jcr, B_DB *db);
-void db_close_database(JCR *jcr, B_DB *db);
bool db_open_batch_connexion(JCR *jcr, B_DB *mdb);
-void db_escape_string(JCR *jcr, B_DB *db, char *snew, char *old, int len);
-char *db_escape_object(JCR *jcr, B_DB *db, char *old, int len);
-void db_unescape_object(JCR *jcr, B_DB *db,
- char *from, int32_t expected_len,
- POOLMEM **dest, int32_t *len);
char *db_strerror(B_DB *mdb);
-int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index);
-bool db_sql_query(B_DB *mdb, const char *cmd, DB_RESULT_HANDLER *result_handler, void *ctx);
-void db_start_transaction(JCR *jcr, B_DB *mdb);
-void db_end_transaction(JCR *jcr, B_DB *mdb);
int db_int64_handler(void *ctx, int num_fields, char **row);
int db_list_handler(void *ctx, int num_fields, char **row);
-void db_thread_cleanup();
void db_debug_print(JCR *jcr, FILE *fp);
int db_int_handler(void *ctx, int num_fields, char **row);
void db_check_backend_thread_safe();
* Version $Id: sql.c 8034 2008-11-11 14:33:46Z ricozz $
*/
-/* The following is necessary so that we do not include
- * the dummy external definition of B_DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
-
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
-uint32_t bacula_db_version = 0;
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
-int db_type = -1; /* SQL engine type index */
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* Forward referenced subroutines */
void print_dashes(B_DB *mdb);
void print_result(B_DB *mdb);
-B_DB *db_init(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
- const char *db_password, const char *db_address, int db_port,
- const char *db_socket, int mult_db_connections)
-{
-#ifdef HAVE_DBI
- char *p;
- if (!db_driver) {
- Jmsg0(jcr, M_ABORT, 0, _("Driver type not specified in Catalog resource.\n"));
- }
- if (strlen(db_driver) < 5 || db_driver[3] != ':' || strncasecmp(db_driver, "dbi", 3) != 0) {
- Jmsg0(jcr, M_ABORT, 0, _("Invalid driver type, must be \"dbi:<type>\"\n"));
- }
- p = (char *)(db_driver + 4);
- if (strcasecmp(p, "mysql") == 0) {
- db_type = SQL_TYPE_MYSQL;
- } else if (strcasecmp(p, "postgresql") == 0) {
- db_type = SQL_TYPE_POSTGRESQL;
- } else if (strcasecmp(p, "sqlite") == 0) {
- db_type = SQL_TYPE_SQLITE;
- } else if (strcasecmp(p, "sqlite3") == 0) {
- db_type = SQL_TYPE_SQLITE3;
- } else if (strcasecmp(p, "ingres") == 0) {
- db_type = SQL_TYPE_INGRES;
- } else {
- Jmsg1(jcr, M_ABORT, 0, _("Unknown database type: %s\n"), p);
- }
-#elif HAVE_MYSQL
- db_type = SQL_TYPE_MYSQL;
-#elif HAVE_POSTGRESQL
- db_type = SQL_TYPE_POSTGRESQL;
-#elif HAVE_INGRES
- db_type = SQL_TYPE_INGRES;
-#elif HAVE_SQLITE
- db_type = SQL_TYPE_SQLITE;
-#elif HAVE_SQLITE3
- db_type = SQL_TYPE_SQLITE3;
-#endif
-
- return db_init_database(jcr, db_name, db_user, db_password, db_address,
- db_port, db_socket, mult_db_connections);
-}
-
dbid_list::dbid_list()
{
memset(this, 0, sizeof(dbid_list));
return 0;
}
+/*
+ * * specific context passed from db_check_max_connections to db_max_connections_handler.
+ * */
+struct max_connections_context {
+ B_DB *db;
+ uint32_t nr_connections;
+};
/*
- * Called here to retrieve an integer from the database
- */
+ * * Called here to retrieve an integer from the database
+ * */
static int db_max_connections_handler(void *ctx, int num_fields, char **row)
{
- uint32_t *val = (uint32_t *)ctx;
- uint32_t index = sql_get_max_connections_index[db_type];
+ struct max_connections_context *context;
+ uint32_t index;
+
+ context = (struct max_connections_context *)ctx;
+ switch (db_get_type_index(context->db)) {
+ case SQL_TYPE_MYSQL:
+ index = 1;
+ default:
+ index = 0;
+ }
+
if (row[index]) {
- *val = str_to_int64(row[index]);
+ context->nr_connections = str_to_int64(row[index]);
} else {
Dmsg0(800, "int_handler finds zero\n");
- *val = 0;
+ context->nr_connections = 0;
}
return 0;
}
-/*
- * Check catalog max_connections setting
- */
+/*
+ * * Check catalog max_connections setting
+ * */
bool db_check_max_connections(JCR *jcr, B_DB *mdb, uint32_t max_concurrent_jobs)
{
-#ifdef HAVE_BATCH_FILE_INSERT
+ struct max_connections_context context;
+
+ /* Without Batch insert, no need to verify max_connections */
+ if (!mdb->batch_insert_available())
+ return true;
- uint32_t max_conn = 0;
+ context.db = mdb;
+ context.nr_connections = 0;
- /* With Batch insert, verify max_connections */
- if (!db_sql_query(mdb, sql_get_max_connections[db_type],
- db_max_connections_handler, &max_conn)) {
+ /* Check max_connections setting */
+ if (!db_sql_query(mdb, sql_get_max_connections[db_get_type_index(mdb)],
+ db_max_connections_handler, &context)) {
Jmsg(jcr, M_ERROR, 0, "Can't verify max_connections settings %s", mdb->errmsg);
return false;
}
- if (max_conn && max_concurrent_jobs > max_conn) {
- Mmsg(mdb->errmsg,
+ if (context.nr_connections && max_concurrent_jobs && max_concurrent_jobs > context.nr_connections) {
+ Mmsg(mdb->errmsg,
_("Potential performance problem:\n"
"max_connections=%d set for %s database \"%s\" should be larger than Director's "
"MaxConcurrentJobs=%d\n"),
- max_conn, db_get_type(), mdb->db_name, max_concurrent_jobs);
+ context.nr_connections, db_get_type(mdb), mdb->get_db_name(), max_concurrent_jobs);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
return false;
}
-#endif
-
return true;
}
/* Check that the tables correspond to the version we want */
bool check_tables_version(JCR *jcr, B_DB *mdb)
{
+ uint32_t bacula_db_version = 0;
const char *query = "SELECT VersionId FROM Version";
bacula_db_version = 0;
}
if (bacula_db_version != BDB_VERSION) {
Mmsg(mdb->errmsg, "Version error for database \"%s\". Wanted %d, got %d\n",
- mdb->db_name, BDB_VERSION, bacula_db_version);
+ mdb->get_db_name(), BDB_VERSION, bacula_db_version);
Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg);
return false;
}
return true;
}
-/* Utility routine for queries. The database MUST be locked before calling here. */
+/*
+ * Utility routine for queries. The database MUST be locked before calling here.
+ * Returns: 0 on failure
+ * 1 on success
+ */
int
QueryDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd)
{
- int status;
-
sql_free_result(mdb);
- if ((status=sql_query(mdb, cmd)) != 0) {
+ if (!sql_query(mdb, cmd, QF_STORE_RESULT)) {
m_msg(file, line, &mdb->errmsg, _("query %s failed:\n%s\n"), cmd, sql_strerror(mdb));
j_msg(file, line, jcr, M_FATAL, 0, "%s", mdb->errmsg);
if (verbose) {
return 0;
}
- mdb->result = sql_store_result(mdb);
-
- return mdb->result != NULL;
+ return 1;
}
/*
int
InsertDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd)
{
- if (sql_query(mdb, cmd)) {
+ int num_rows;
+
+ if (!sql_query(mdb, cmd)) {
m_msg(file, line, &mdb->errmsg, _("insert %s failed:\n%s\n"), cmd, sql_strerror(mdb));
j_msg(file, line, jcr, M_FATAL, 0, "%s", mdb->errmsg);
if (verbose) {
}
return 0;
}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows != 1) {
+ num_rows = sql_affected_rows(mdb);
+ if (num_rows != 1) {
char ed1[30];
m_msg(file, line, &mdb->errmsg, _("Insertion problem: affected_rows=%s\n"),
- edit_uint64(mdb->num_rows, ed1));
+ edit_uint64(num_rows, ed1));
if (verbose) {
j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd);
}
int
UpdateDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd)
{
+ int num_rows;
- if (sql_query(mdb, cmd)) {
+ if (!sql_query(mdb, cmd)) {
m_msg(file, line, &mdb->errmsg, _("update %s failed:\n%s\n"), cmd, sql_strerror(mdb));
j_msg(file, line, jcr, M_ERROR, 0, "%s", mdb->errmsg);
if (verbose) {
}
return 0;
}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows < 1) {
+ num_rows = sql_affected_rows(mdb);
+ if (num_rows < 1) {
char ed1[30];
m_msg(file, line, &mdb->errmsg, _("Update failed: affected_rows=%s for %s\n"),
- edit_uint64(mdb->num_rows, ed1), cmd);
+ edit_uint64(num_rows, ed1), cmd);
if (verbose) {
// j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd);
}
DeleteDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd)
{
- if (sql_query(mdb, cmd)) {
+ if (!sql_query(mdb, cmd)) {
m_msg(file, line, &mdb->errmsg, _("delete %s failed:\n%s\n"), cmd, sql_strerror(mdb));
j_msg(file, line, jcr, M_ERROR, 0, "%s", mdb->errmsg);
if (verbose) {
}
/*
- * Return pre-edited error message
- */
+ * * Return pre-edited error message
+ * */
char *db_strerror(B_DB *mdb)
{
return mdb->errmsg;
}
-/*
- * Lock database, this can be called multiple times by the same
- * thread without blocking, but must be unlocked the number of
- * times it was locked.
- */
-void _db_lock(const char *file, int line, B_DB *mdb)
-{
- int errstat;
- if ((errstat=rwl_writelock_p(&mdb->lock, file, line)) != 0) {
- berrno be;
- e_msg(file, line, M_FATAL, 0, "rwl_writelock failure. stat=%d: ERR=%s\n",
- errstat, be.bstrerror(errstat));
- }
-}
-
-/*
- * Unlock the database. This can be called multiple times by the
- * same thread up to the number of times that thread called
- * db_lock()/
- */
-void _db_unlock(const char *file, int line, B_DB *mdb)
-{
- int errstat;
- if ((errstat=rwl_writeunlock(&mdb->lock)) != 0) {
- berrno be;
- e_msg(file, line, M_FATAL, 0, "rwl_writeunlock failure. stat=%d: ERR=%s\n",
- errstat, be.bstrerror(errstat));
- }
-}
-
-/*
- * Start a transaction. This groups inserts and makes things
- * much more efficient. Usually started when inserting
- * file attributes.
- */
-void db_start_transaction(JCR *jcr, B_DB *mdb)
-{
- if (!jcr->attr) {
- jcr->attr = get_pool_memory(PM_FNAME);
- }
- if (!jcr->ar) {
- jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
- }
-
-#ifdef HAVE_SQLITE
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- /* Allow only 10,000 changes per transaction */
- if (mdb->transaction && mdb->changes > 10000) {
- db_end_transaction(jcr, mdb);
- }
- if (!mdb->transaction) {
- my_sqlite_query(mdb, "BEGIN"); /* begin transaction */
- Dmsg0(400, "Start SQLite transaction\n");
- mdb->transaction = 1;
- }
- db_unlock(mdb);
-#endif
-
-/*
- * This is turned off because transactions break
- * if multiple simultaneous jobs are run.
- */
-#ifdef HAVE_POSTGRESQL
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- /* Allow only 25,000 changes per transaction */
- if (mdb->transaction && mdb->changes > 25000) {
- db_end_transaction(jcr, mdb);
- }
- if (!mdb->transaction) {
- db_sql_query(mdb, "BEGIN", NULL, NULL); /* begin transaction */
- Dmsg0(400, "Start PosgreSQL transaction\n");
- mdb->transaction = 1;
- }
- db_unlock(mdb);
-#endif
-
-#ifdef HAVE_INGRES
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- /* Allow only 25,000 changes per transaction */
- if (mdb->transaction && mdb->changes > 25000) {
- db_end_transaction(jcr, mdb);
- }
- if (!mdb->transaction) {
- db_sql_query(mdb, "BEGIN", NULL, NULL); /* begin transaction */
- Dmsg0(400, "Start Ingres transaction\n");
- mdb->transaction = 1;
- }
- db_unlock(mdb);
-#endif
-
-#ifdef HAVE_DBI
- if (db_type == SQL_TYPE_SQLITE) {
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- /* Allow only 10,000 changes per transaction */
- if (mdb->transaction && mdb->changes > 10000) {
- db_end_transaction(jcr, mdb);
- }
- if (!mdb->transaction) {
- //my_sqlite_query(mdb, "BEGIN"); /* begin transaction */
- db_sql_query(mdb, "BEGIN", NULL, NULL); /* begin transaction */
- Dmsg0(400, "Start SQLite transaction\n");
- mdb->transaction = 1;
- }
- db_unlock(mdb);
- } else if (db_type == SQL_TYPE_POSTGRESQL) {
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- /* Allow only 25,000 changes per transaction */
- if (mdb->transaction && mdb->changes > 25000) {
- db_end_transaction(jcr, mdb);
- }
- if (!mdb->transaction) {
- db_sql_query(mdb, "BEGIN", NULL, NULL); /* begin transaction */
- Dmsg0(400, "Start PosgreSQL transaction\n");
- mdb->transaction = 1;
- }
- db_unlock(mdb);
- }
-#endif
-}
-
-void db_end_transaction(JCR *jcr, B_DB *mdb)
-{
- /*
- * This can be called during thread cleanup and
- * the db may already be closed. So simply return.
- */
- if (!mdb) {
- return;
- }
-
- if (jcr && jcr->cached_attribute) {
- Dmsg0(400, "Flush last cached attribute.\n");
- if (!db_create_attributes_record(jcr, mdb, jcr->ar)) {
- Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
- }
- jcr->cached_attribute = false;
- }
-
-#ifdef HAVE_SQLITE
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- if (mdb->transaction) {
- my_sqlite_query(mdb, "COMMIT"); /* end transaction */
- mdb->transaction = 0;
- Dmsg1(400, "End SQLite transaction changes=%d\n", mdb->changes);
- }
- mdb->changes = 0;
- db_unlock(mdb);
-#endif
-
-
-
-#ifdef HAVE_INGRES
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- if (mdb->transaction) {
- db_sql_query(mdb, "COMMIT", NULL, NULL); /* end transaction */
- mdb->transaction = 0;
- Dmsg1(400, "End Ingres transaction changes=%d\n", mdb->changes);
- }
- mdb->changes = 0;
- db_unlock(mdb);
-#endif
-
-
-#ifdef HAVE_POSTGRESQL
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- if (mdb->transaction) {
- db_sql_query(mdb, "COMMIT", NULL, NULL); /* end transaction */
- mdb->transaction = 0;
- Dmsg1(400, "End PostgreSQL transaction changes=%d\n", mdb->changes);
- }
- mdb->changes = 0;
- db_unlock(mdb);
-#endif
-
-#ifdef HAVE_DBI
- if (db_type == SQL_TYPE_SQLITE) {
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- if (mdb->transaction) {
- //my_sqlite_query(mdb, "COMMIT"); /* end transaction */
- db_sql_query(mdb, "COMMIT", NULL, NULL); /* end transaction */
- mdb->transaction = 0;
- Dmsg1(400, "End SQLite transaction changes=%d\n", mdb->changes);
- }
- mdb->changes = 0;
- db_unlock(mdb);
- } else if (db_type == SQL_TYPE_POSTGRESQL) {
- if (!mdb->allow_transactions) {
- return;
- }
- db_lock(mdb);
- if (mdb->transaction) {
- db_sql_query(mdb, "COMMIT", NULL, NULL); /* end transaction */
- mdb->transaction = 0;
- Dmsg1(400, "End PostgreSQL transaction changes=%d\n", mdb->changes);
- }
- mdb->changes = 0;
- db_unlock(mdb);
- }
-#endif
-}
-
/*
* Given a full filename, split it into its path
* and filename parts. They are returned in pool memory
char buf[2000], ewc[30];
Dmsg0(800, "list_result starts\n");
- if (mdb->result == NULL || sql_num_rows(mdb) == 0) {
+ if (sql_num_rows(mdb) == 0) {
send(ctx, _("No results to list.\n"));
return;
}
max_len = col_len;
}
} else {
- if (IS_NUM(field->type) && (int)field->max_length > 0) { /* fixup for commas */
+ if (sql_field_is_numeric(mdb, field->type) && (int)field->max_length > 0) { /* fixup for commas */
field->max_length += (field->max_length - 1) / 3;
- }
+ }
if (col_len < (int)field->max_length) {
col_len = field->max_length;
- }
- if (col_len < 4 && !IS_NOT_NULL(field->flags)) {
+ }
+ if (col_len < 4 && !sql_field_is_not_null(mdb, field->flags)) {
col_len = 4; /* 4 = length of the word "NULL" */
}
field->max_length = col_len; /* reset column info */
max_len = max_length(field->max_length);
if (row[i] == NULL) {
bsnprintf(buf, sizeof(buf), " %-*s |", max_len, "NULL");
- } else if (IS_NUM(field->type) && !jcr->gui && is_an_integer(row[i])) {
+ } else if (sql_field_is_numeric(mdb, field->type) && !jcr->gui && is_an_integer(row[i])) {
bsnprintf(buf, sizeof(buf), " %*s |", max_len,
add_commas(row[i], ewc));
} else {
}
if (row[i] == NULL) {
bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name, "NULL");
- } else if (IS_NUM(field->type) && !jcr->gui && is_an_integer(row[i])) {
+ } else if (sql_field_is_numeric(mdb, field->type) && !jcr->gui && is_an_integer(row[i])) {
bsnprintf(buf, sizeof(buf), " %*s: %s\n", max_len, field->name,
add_commas(row[i], ewc));
} else {
*/
bool db_open_batch_connexion(JCR *jcr, B_DB *mdb)
{
-#ifdef HAVE_BATCH_FILE_INSERT
- const int multi_db = true; /* we force a new connection only if batch insert is enabled */
-#else
- const int multi_db = false;
-#endif
+ bool multi_db;
+
+ if (mdb->batch_insert_available())
+ multi_db = true; /* we force a new connection only if batch insert is enabled */
+ else
+ multi_db = false;
if (!jcr->db_batch) {
- jcr->db_batch = db_init_database(jcr,
- mdb->db_name,
- mdb->db_user,
- mdb->db_password,
- mdb->db_address,
- mdb->db_port,
- mdb->db_socket,
- multi_db /* multi_db = true when using batch mode */);
+ jcr->db_batch = db_clone_database_connection(mdb, jcr, multi_db);
if (!jcr->db_batch) {
Mmsg0(&mdb->errmsg, _("Could not init database batch connection"));
Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg);
if (!db_open_database(jcr, jcr->db_batch)) {
Mmsg2(&mdb->errmsg, _("Could not open database \"%s\": ERR=%s\n"),
- jcr->db_batch->db_name, db_strerror(jcr->db_batch));
+ jcr->db_batch->get_db_name(), db_strerror(jcr->db_batch));
Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg);
return false;
}
- Dmsg3(100, "initdb ref=%d connected=%d db=%p\n", jcr->db_batch->ref_count,
- jcr->db_batch->connected, jcr->db_batch->db);
-
}
return true;
}
return;
}
- fprintf(fp, "B_DB=%p db_name=%s db_user=%s connected=%i\n",
- mdb, NPRTB(mdb->db_name), NPRTB(mdb->db_user), mdb->connected);
+ fprintf(fp, "B_DB=%p db_name=%s db_user=%s connected=%s\n",
+ mdb, NPRTB(mdb->get_db_name()), NPRTB(mdb->get_db_user()), mdb->is_connected() ? "true" : "false");
fprintf(fp, "\tcmd=\"%s\" changes=%i\n", NPRTB(mdb->cmd), mdb->changes);
- if (mdb->lock.valid == RWLOCK_VALID) {
- fprintf(fp, "\tRWLOCK=%p w_active=%i w_wait=%i\n", &mdb->lock, mdb->lock.w_active, mdb->lock.w_wait);
- }
+ mdb->print_lock_info(fp);
}
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES*/
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2002-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2002-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*/
#include "bacula.h"
-#include "cats.h"
const char *cleanup_created_job =
"UPDATE Job SET JobStatus='f', StartTime=SchedTime, EndTime=SchedTime "
"AND T1.PathId = File.PathId "
"AND T1.FilenameId = File.FilenameId";
-const char *select_recent_version_with_basejob[5] = {
- /* MySQL */
+const char *select_recent_version_with_basejob[] = {
+ /* MySQL */
select_recent_version_with_basejob_default,
- /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */
- "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, "
+ /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */
+ "SELECT DISTINCT ON (FilenameId, PathId) JobTDate, JobId, FileId, "
"FileIndex, PathId, FilenameId, LStat, MD5, MarkId "
"FROM "
"(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5, MarkId "
") AS T JOIN Job USING (JobId) "
"ORDER BY FilenameId, PathId, JobTDate DESC ",
- /* SQLite */
- select_recent_version_with_basejob_default,
-
- /* SQLite3 */
+ /* SQLite3 */
select_recent_version_with_basejob_default,
- /* Ingres */
+ /* Ingres */
select_recent_version_with_basejob_default
};
"AND T1.PathId = File.PathId "
"AND T1.FilenameId = File.FilenameId";
-const char *select_recent_version_with_basejob_and_delta[5] = {
- /* MySQL */
+const char *select_recent_version_with_basejob_and_delta[] = {
+ /* MySQL */
select_recent_version_with_basejob_and_delta_default,
- /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */
- "SELECT DISTINCT ON (FilenameId, PathId, MarkId) JobTDate, JobId, FileId, "
+ /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */
+ "SELECT DISTINCT ON (FilenameId, PathId, MarkId) JobTDate, JobId, FileId, "
"FileIndex, PathId, FilenameId, LStat, MD5, MarkId "
"FROM "
"(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5, MarkId "
") AS T JOIN Job USING (JobId) "
"ORDER BY FilenameId, PathId, MarkId, JobTDate DESC ",
- /* SQLite */
- select_recent_version_with_basejob_and_delta_default,
-
- /* SQLite3 */
+ /* SQLite3 */
select_recent_version_with_basejob_and_delta_default,
- /* Ingres */
+ /* Ingres */
select_recent_version_with_basejob_and_delta_default
};
"AND t1.PathId = f1.PathId "
"AND j1.JobId = f1.JobId";
-const char *select_recent_version[5] = {
+const char *select_recent_version[] = {
/* MySQL */
select_recent_version_default,
"WHERE JobId IN (%s) "
"ORDER BY FilenameId, PathId, JobTDate DESC ",
- /* SQLite */
- select_recent_version_default,
-
/* SQLite3 */
select_recent_version_default,
"AND FileSet.FileSet=(SELECT FileSet FROM FileSet WHERE FileSetId = %s) "
"ORDER BY Job.JobTDate DESC LIMIT 1";
-const char *create_temp_accurate_jobids[5] = {
+const char *create_temp_accurate_jobids[] = {
/* Mysql */
create_temp_accurate_jobids_default,
+
/* Postgresql */
create_temp_accurate_jobids_default,
- /* SQLite */
- create_temp_accurate_jobids_default,
+
/* SQLite3 */
create_temp_accurate_jobids_default,
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE btemp3%s AS "
"SELECT JobId, StartTime, EndTime, JobTDate, PurgedFiles "
"ON COMMIT PRESERVE ROWS WITH NORECOVERY"
};
-const char *create_temp_basefile[5] = {
+const char *create_temp_basefile[] = {
/* Mysql */
"CREATE TEMPORARY TABLE basefile%lld ("
"Path BLOB NOT NULL,"
"Name BLOB NOT NULL)",
+
/* Postgresql */
"CREATE TEMPORARY TABLE basefile%lld ("
"Path TEXT,"
"Name TEXT)",
- /* SQLite */
- "CREATE TEMPORARY TABLE basefile%lld ("
- "Path TEXT,"
- "Name TEXT)",
+
/* SQLite3 */
"CREATE TEMPORARY TABLE basefile%lld ("
"Path TEXT,"
"Name TEXT)",
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE basefile%lld ("
"Path VARBYTE(32000) NOT NULL,"
"ON COMMIT PRESERVE ROWS WITH NORECOVERY"
};
-const char *create_temp_new_basefile[5] = {
+const char *create_temp_new_basefile[] = {
/* Mysql */
"CREATE TEMPORARY TABLE new_basefile%lld AS "
"SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
"JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
"JOIN Path ON (Path.PathId = Temp.PathId) "
"WHERE Temp.FileIndex > 0",
+
/* Postgresql */
"CREATE TEMPORARY TABLE new_basefile%lld AS "
"SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
"JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
"JOIN Path ON (Path.PathId = Temp.PathId) "
"WHERE Temp.FileIndex > 0",
- /* SQLite */
- "CREATE TEMPORARY TABLE new_basefile%lld AS "
- "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
- "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, "
- "Temp.MD5 AS MD5 "
- "FROM ( %s ) AS Temp "
- "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
- "JOIN Path ON (Path.PathId = Temp.PathId) "
- "WHERE Temp.FileIndex > 0",
+
/* SQLite3 */
"CREATE TEMPORARY TABLE new_basefile%lld AS "
"SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
"JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
"JOIN Path ON (Path.PathId = Temp.PathId) "
"WHERE Temp.FileIndex > 0",
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE new_basefile%lld AS "
"SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
/* ====== ua_prune.c */
/* List of SQL commands to create temp table and indicies */
-const char *create_deltabs[5] = {
+const char *create_deltabs[] = {
/* MySQL */
"CREATE TEMPORARY TABLE DelCandidates ("
"JobId INTEGER UNSIGNED NOT NULL, "
"FileSetId INTEGER UNSIGNED, "
"JobFiles INTEGER UNSIGNED, "
"JobStatus BINARY(1))",
+
/* Postgresql */
"CREATE TEMPORARY TABLE DelCandidates ("
"JobId INTEGER NOT NULL, "
"FileSetId INTEGER, "
"JobFiles INTEGER, "
"JobStatus char(1))",
- /* SQLite */
- "CREATE TEMPORARY TABLE DelCandidates ("
- "JobId INTEGER UNSIGNED NOT NULL, "
- "PurgedFiles TINYINT, "
- "FileSetId INTEGER UNSIGNED, "
- "JobFiles INTEGER UNSIGNED, "
- "JobStatus CHAR)",
+
/* SQLite3 */
"CREATE TEMPORARY TABLE DelCandidates ("
"JobId INTEGER UNSIGNED NOT NULL, "
"FileSetId INTEGER UNSIGNED, "
"JobFiles INTEGER UNSIGNED, "
"JobStatus CHAR)",
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE DelCandidates ("
"JobId INTEGER NOT NULL, "
") "
"GROUP BY PriorJobId "; /* one result per copy */
-const char *uap_upgrade_copies_oldest_job[5] = {
+const char *uap_upgrade_copies_oldest_job[] = {
/* Mysql */
uap_upgrade_copies_oldest_job_default,
+
/* Postgresql */
uap_upgrade_copies_oldest_job_default,
- /* SQLite */
- uap_upgrade_copies_oldest_job_default,
+
/* SQLite3 */
uap_upgrade_copies_oldest_job_default,
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE cpy_tmp AS "
"SELECT MIN(JobId) AS JobId FROM Job " /* Choose the oldest job */
/* ======= ua_restore.c */
/* List Jobs where a particular file is saved */
-const char *uar_file[5] = {
+const char *uar_file[] = {
/* Mysql */
"SELECT Job.JobId as JobId,"
"CONCAT(Path.Path,Filename.Name) as Name, "
"AND Job.JobId=File.JobId AND File.FileIndex > 0 "
"AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
"AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+
/* Postgresql */
"SELECT Job.JobId as JobId,"
"Path.Path||Filename.Name as Name, "
"AND Job.JobId=File.JobId AND File.FileIndex > 0 "
"AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
"AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
- /* SQLite */
- "SELECT Job.JobId as JobId,"
- "Path.Path||Filename.Name as Name, "
- "StartTime,Type as JobType,JobStatus,JobFiles,JobBytes "
- "FROM Client,Job,File,Filename,Path WHERE Client.Name='%s' "
- "AND Client.ClientId=Job.ClientId "
- "AND Job.JobId=File.JobId AND File.FileIndex > 0 "
- "AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
- "AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+
/* SQLite3 */
"SELECT Job.JobId as JobId,"
"Path.Path||Filename.Name as Name, "
"AND Job.JobId=File.JobId AND File.FileIndex > 0 "
"AND Path.PathId=File.PathId AND Filename.FilenameId=File.FilenameId "
"AND Filename.Name='%s' ORDER BY StartTime DESC LIMIT 20",
+
/* Ingres */
"SELECT Job.JobId as JobId,"
"Path.Path||Filename.Name as Name, "
"AND Filename.Name='%s' ORDER BY StartTime DESC FETCH FIRST 20 ROWS ONLY"
};
-const char *uar_create_temp[5] = {
+const char *uar_create_temp[] = {
/* Mysql */
"CREATE TEMPORARY TABLE temp ("
"JobId INTEGER UNSIGNED NOT NULL,"
"StartFile INTEGER UNSIGNED,"
"VolSessionId INTEGER UNSIGNED,"
"VolSessionTime INTEGER UNSIGNED)",
+
/* Postgresql */
"CREATE TEMPORARY TABLE temp ("
"JobId INTEGER NOT NULL,"
"StartFile INTEGER,"
"VolSessionId INTEGER,"
"VolSessionTime INTEGER)",
- /* SQLite */
- "CREATE TEMPORARY TABLE temp ("
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED,"
- "ClientId INTEGER UNSIGNED,"
- "Level CHAR,"
- "JobFiles INTEGER UNSIGNED,"
- "JobBytes BIGINT UNSIGNED,"
- "StartTime TEXT,"
- "VolumeName TEXT,"
- "StartFile INTEGER UNSIGNED,"
- "VolSessionId INTEGER UNSIGNED,"
- "VolSessionTime INTEGER UNSIGNED)",
+
/* SQLite3 */
"CREATE TEMPORARY TABLE temp ("
"JobId INTEGER UNSIGNED NOT NULL,"
"StartFile INTEGER UNSIGNED,"
"VolSessionId INTEGER UNSIGNED,"
"VolSessionTime INTEGER UNSIGNED)",
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE temp ("
"JobId INTEGER NOT NULL,"
"ON COMMIT PRESERVE ROWS WITH NORECOVERY"
};
-const char *uar_create_temp1[5] = {
+const char *uar_create_temp1[] = {
/* Mysql */
"CREATE TEMPORARY TABLE temp1 ("
"JobId INTEGER UNSIGNED NOT NULL,"
"JobTDate BIGINT UNSIGNED)",
+
/* Postgresql */
"CREATE TEMPORARY TABLE temp1 ("
"JobId INTEGER NOT NULL,"
"JobTDate BIGINT)",
- /* SQLite */
- "CREATE TEMPORARY TABLE temp1 ("
- "JobId INTEGER UNSIGNED NOT NULL,"
- "JobTDate BIGINT UNSIGNED)",
+
/* SQLite3 */
"CREATE TEMPORARY TABLE temp1 ("
"JobId INTEGER UNSIGNED NOT NULL,"
"JobTDate BIGINT UNSIGNED)",
+
/* Ingres */
"DECLARE GLOBAL TEMPORARY TABLE temp1 ("
"JobId INTEGER NOT NULL,"
"JobTDate BIGINT) "
"ON COMMIT PRESERVE ROWS WITH NORECOVERY"
- };
+};
/* Query to get all files in a directory -- no recursing
* Note, for PostgreSQL since it respects the "Single Value
* for each time it was backed up.
*/
-const char *uar_jobid_fileindex_from_dir[5] = {
+const char *uar_jobid_fileindex_from_dir[] = {
/* Mysql */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Path.PathId=File.Pathid "
"AND Filename.FilenameId=File.FilenameId "
"GROUP BY File.FileIndex ",
+
/* Postgresql */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Job.ClientId=Client.ClientId "
"AND Path.PathId=File.Pathid "
"AND Filename.FilenameId=File.FilenameId",
- /* SQLite */
- "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
- "WHERE Job.JobId IN (%s) "
- "AND Job.JobId=File.JobId "
- "AND Path.Path='%s' "
- "AND Client.Name='%s' "
- "AND Job.ClientId=Client.ClientId "
- "AND Path.PathId=File.Pathid "
- "AND Filename.FilenameId=File.FilenameId "
- "GROUP BY File.FileIndex ",
+
/* SQLite3 */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Path.PathId=File.Pathid "
"AND Filename.FilenameId=File.FilenameId "
"GROUP BY File.FileIndex ",
+
/* Ingres */
"SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Filename.FilenameId=File.FilenameId"
};
-const char *sql_media_order_most_recently_written[5] = {
+const char *sql_media_order_most_recently_written[] = {
/* Mysql */
"ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",
+
/* Postgresql */
"ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",
- /* SQLite */
- "ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",
+
/* SQLite3 */
"ORDER BY LastWritten IS NULL,LastWritten DESC,MediaId",
+
/* Ingres */
"ORDER BY IFNULL(LastWritten, '1970-01-01 00:00:00') DESC,MediaId"
};
-const char *sql_get_max_connections[5] = {
+const char *sql_get_max_connections[] = {
/* Mysql */
"SHOW VARIABLES LIKE 'max_connections'",
+
/* Postgresql */
"SHOW max_connections",
- /* SQLite */
- "SELECT 0",
+
/* SQLite3 */
"SELECT 0",
- /* Ingres (TODO) */
- "SELECT 0"
-};
-/* Row number of the max_connections setting */
-const uint32_t sql_get_max_connections_index[5] = {
- /* Mysql */
- 1,
- /* Postgresql */
- 0,
- /* SQLite */
- 0,
- /* SQLite3 */
- 0,
/* Ingres (TODO) */
- 0
+ "SELECT 0"
};
-const char *sql_bvfs_select[5] = {
+const char *sql_bvfs_select[] = {
/* Mysql */
"CREATE TABLE %s AS ( "
"SELECT JobId, FileIndex, FileId, max(JobTDate) as JobTDate "
"FROM btemp%s "
"GROUP BY PathId, FilenameId "
"HAVING FileIndex > 0)",
+
/* Postgresql */
"CREATE TABLE %s AS ( "
"SELECT JobId, FileIndex, FileId "
"ORDER BY PathId, FilenameId, JobTDate DESC "
") AS T "
"WHERE FileIndex > 0)",
- /* SQLite */
- "SELECT 0",
+
/* SQLite3 */
"SELECT 0",
+
/* Ingres (TODO) */
"SELECT 0"
};
const char *sql_bvfs_list_files[] = {
/* Mysql */
-/* JobId PathId JobId PathId Limit Offset AND? Filename? JobId JobId*/
+ /* JobId PathId JobId PathId Limit Offset AND? Filename? JobId JobId*/
sql_bvfs_list_files_default,
-/* JobId PathId JobId PathId WHERE? Filename? Limit Offset*/
+ /* JobId PathId JobId PathId WHERE? Filename? Limit Offset*/
/* Postgresql */
"SELECT DISTINCT ON (FilenameId) 'F', PathId, T.FilenameId, "
"Filename.Name, JobId, LStat, FileId "
sql_bvfs_list_files_default
};
+const char *batch_lock_path_query[] = {
+ /* Mysql */
+ "LOCK TABLES Path write, batch write, Path as p write",
+
+ /* Postgresql */
+ "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",
+
+ /* SQLite3 */
+ "BEGIN",
+
+ /* Ingres */
+ "BEGIN"
+};
+
+const char *batch_lock_filename_query[] = {
+ /* Mysql */
+ "LOCK TABLES Filename write, batch write, Filename as f write",
+
+ /* Postgresql */
+ "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",
+
+ /* SQLite3 */
+ "BEGIN",
+
+ /* Ingres */
+ "BEGIN"
+};
+
+const char *batch_unlock_tables_query[] = {
+ /* Mysql */
+ "UNLOCK TABLES",
+
+ /* Postgresql */
+ "COMMIT",
+
+ /* SQLite3 */
+ "COMMIT",
+
+ /* Ingres */
+ "COMMIT"
+};
+
+const char *batch_fill_path_query[] = {
+ /* Mysql */
+ "INSERT INTO Path (Path) "
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
+ "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
+
+ /* Postgresql */
+ "INSERT INTO Path (Path) "
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a "
+ "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
+
+ /* SQLite3 */
+ "INSERT INTO Path (Path) "
+ "SELECT DISTINCT Path FROM batch "
+ "EXCEPT SELECT Path FROM Path",
+
+ /* Ingres */
+ "INSERT INTO Path (Path) "
+ "SELECT DISTINCT b.Path FROM batch b "
+ "WHERE NOT EXISTS (SELECT Path FROM Path p WHERE p.Path = b.Path)"
+};
+
+const char *batch_fill_filename_query[] = {
+ /* Mysql */
+ "INSERT INTO Filename (Name) "
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",
+
+ /* Postgresql */
+ "INSERT INTO Filename (Name) "
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) as a "
+ "WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename WHERE Name = a.Name)",
+
+ /* SQLite3 */
+ "INSERT INTO Filename (Name) "
+ "SELECT DISTINCT Name FROM batch "
+ "EXCEPT SELECT Name FROM Filename",
+
+ /* Ingres */
+ "INSERT INTO Filename (Name) "
+ "SELECT DISTINCT b.Name FROM batch b "
+ "WHERE NOT EXISTS (SELECT Name FROM Filename f WHERE f.Name = b.Name)"
+};
+
+const char *match_query[] = {
+ /* Mysql */
+ "MATCH",
+
+ /* Postgresql */
+ "~",
+
+ /* SQLite3 */
+ "MATCH",
+
+ /* Ingres */
+ "~"
+};
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table;
extern const char CATS_IMP_EXP *uar_sel_jobid_temp;
-extern const char CATS_IMP_EXP *select_recent_version[5];
-extern const char CATS_IMP_EXP *select_recent_version_with_basejob[5];
+extern const char CATS_IMP_EXP *select_recent_version[];
+extern const char CATS_IMP_EXP *select_recent_version_with_basejob[];
extern const char CATS_IMP_EXP *select_recent_version_with_basejob_and_delta[];
-extern const char CATS_IMP_EXP *create_temp_accurate_jobids[5];
-extern const char CATS_IMP_EXP *create_temp_basefile[5];
-extern const char CATS_IMP_EXP *create_temp_new_basefile[5];
-extern const char CATS_IMP_EXP *create_deltabs[5];
-extern const char CATS_IMP_EXP *uap_upgrade_copies_oldest_job[5];
+extern const char CATS_IMP_EXP *create_temp_accurate_jobids[];
+extern const char CATS_IMP_EXP *create_temp_basefile[];
+extern const char CATS_IMP_EXP *create_temp_new_basefile[];
+extern const char CATS_IMP_EXP *create_deltabs[];
+extern const char CATS_IMP_EXP *uap_upgrade_copies_oldest_job[];
-extern const char CATS_IMP_EXP *uar_file[5];
-extern const char CATS_IMP_EXP *uar_create_temp[5];
-extern const char CATS_IMP_EXP *uar_create_temp1[5];
-extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir[5];
-extern const char CATS_IMP_EXP *sql_media_order_most_recently_written[5];
-extern const char CATS_IMP_EXP *sql_get_max_connections[5];
-extern const uint32_t CATS_IMP_EXP sql_get_max_connections_index[5];
-extern const char *sql_bvfs_select[5];
-extern const char *sql_bvfs_list_files[];
+extern const char CATS_IMP_EXP *uar_file[];
+extern const char CATS_IMP_EXP *uar_create_temp[];
+extern const char CATS_IMP_EXP *uar_create_temp1[];
+extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_dir[];
+extern const char CATS_IMP_EXP *sql_media_order_most_recently_written[];
+extern const char CATS_IMP_EXP *sql_get_max_connections[];
+extern const char CATS_IMP_EXP *sql_bvfs_select[];
+extern const char CATS_IMP_EXP *sql_bvfs_list_files[];
+
+extern const char CATS_IMP_EXP *batch_lock_path_query[];
+extern const char CATS_IMP_EXP *batch_lock_filename_query[];
+extern const char CATS_IMP_EXP *batch_unlock_tables_query[];
+extern const char CATS_IMP_EXP *batch_fill_path_query[];
+extern const char CATS_IMP_EXP *batch_fill_filename_query[];
+extern const char CATS_IMP_EXP *match_query[];
*
*/
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
static const int dbglevel = 100;
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* -----------------------------------------------------------------------
*
*/
/* Forward referenced subroutines */
-#ifndef HAVE_BATCH_FILE_INSERT
static int db_create_file_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
static int db_create_filename_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
-#endif /* HAVE_BATCH_FILE_INSERT */
-
/** Create a new record for the Job
* Returns: false on failure
{
bool stat;
char ed1[30], ed2[30], ed3[50], ed4[50], ed5[50];
+ int num_rows;
Dmsg0(200, "In create pool\n");
db_lock(mdb);
Dmsg1(200, "selectpool: %s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 0) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 0) {
Mmsg1(&mdb->errmsg, _("pool record %s already exists\n"), pr->Name);
sql_free_result(mdb);
db_unlock(mdb);
{
bool ok;
char ed1[30], ed2[30];
+ int num_rows;
Dmsg0(200, "In create Device\n");
db_lock(mdb);
Dmsg1(200, "selectdevice: %s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 0) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 0) {
Mmsg1(&mdb->errmsg, _("Device record %s already exists\n"), dr->Name);
sql_free_result(mdb);
db_unlock(mdb);
{
SQL_ROW row;
bool ok;
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd, "SELECT StorageId,AutoChanger FROM Storage WHERE Name='%s'", sr->Name);
sr->created = false;
/* Check if it already exists */
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
+ num_rows = sql_num_rows(mdb);
/* If more than one, report error, but return first row */
- if (mdb->num_rows > 1) {
- Mmsg1(&mdb->errmsg, _("More than one Storage record!: %d\n"), (int)(mdb->num_rows));
+ if (num_rows > 1) {
+ Mmsg1(&mdb->errmsg, _("More than one Storage record!: %d\n"), num_rows);
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(&mdb->errmsg, _("error fetching Storage row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr)
{
bool stat;
+ int num_rows;
Dmsg0(200, "In create mediatype\n");
db_lock(mdb);
Dmsg1(200, "selectmediatype: %s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 0) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 0) {
Mmsg1(&mdb->errmsg, _("mediatype record %s already exists\n"), mr->MediaType);
sql_free_result(mdb);
db_unlock(mdb);
char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50], ed7[50], ed8[50];
char ed9[50], ed10[50], ed11[50], ed12[50];
struct tm tm;
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd, "SELECT MediaId FROM Media WHERE VolumeName='%s'",
Dmsg1(500, "selectpool: %s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 0) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 0) {
Mmsg1(&mdb->errmsg, _("Volume \"%s\" already exists.\n"), mr->VolumeName);
sql_free_result(mdb);
db_unlock(mdb);
SQL_ROW row;
int stat;
char ed1[50], ed2[50];
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd, "SELECT ClientId,Uname FROM Client WHERE Name='%s'", cr->Name);
cr->ClientId = 0;
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
+ num_rows = sql_num_rows(mdb);
/* If more than one, report error, but return first row */
- if (mdb->num_rows > 1) {
- Mmsg1(&mdb->errmsg, _("More than one Client!: %d\n"), (int)(mdb->num_rows));
+ if (num_rows > 1) {
+ Mmsg1(&mdb->errmsg, _("More than one Client!: %d\n"), num_rows);
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(&mdb->errmsg, _("error fetching Client row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
{
SQL_ROW row;
int stat;
+ int num_rows;
mdb->esc_name = check_pool_memory_size(mdb->esc_name, 2*mdb->pnl+2);
db_escape_string(jcr, mdb, mdb->esc_name, mdb->path, mdb->pnl);
Mmsg(mdb->cmd, "SELECT PathId FROM Path WHERE Path='%s'", mdb->esc_name);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
char ed1[30];
Mmsg2(&mdb->errmsg, _("More than one Path!: %s for path: %s\n"),
- edit_uint64(mdb->num_rows, ed1), mdb->path);
+ edit_uint64(num_rows, ed1), mdb->path);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
}
/* Even if there are multiple paths, take the first one */
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(&mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
SQL_ROW row;
bool stat;
struct tm tm;
+ int num_rows;
db_lock(mdb);
fsr->created = false;
fsr->FileSetId = 0;
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
- Mmsg1(&mdb->errmsg, _("More than one FileSet!: %d\n"), (int)(mdb->num_rows));
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
+ Mmsg1(&mdb->errmsg, _("More than one FileSet!: %d\n"), num_rows);
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(&mdb->errmsg, _("error fetching FileSet row: ERR=%s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
* };
*/
-#ifdef HAVE_BATCH_FILE_INSERT
-
-/** All sql_batch_* functions are used to do bulk batch insert in File/Filename/Path
- * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1"
- * in baconfig.h
+/**
+ * All sql_batch_* functions are used to do bulk batch insert in File/Filename/Path
+ * tables.
*
* To sum up :
* - bulk load a temp table
* - then insert the join between the temp, filename and path tables into file.
*/
-/*
- * Returns 1 if OK
- * 0 if failed
- */
-bool my_batch_start(JCR *jcr, B_DB *mdb)
-{
- bool ok;
-
- db_lock(mdb);
- ok = db_sql_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
- "FileIndex integer,"
- "JobId integer,"
- "Path blob,"
- "Name blob,"
- "LStat tinyblob,"
- "MD5 tinyblob,"
- "MarkId integer)",NULL, NULL);
- db_unlock(mdb);
- return ok;
-}
-
-/*
- * Returns 1 if OK
- * 0 if failed
- */
-bool my_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
-{
- size_t len;
- const char *digest;
- char ed1[50];
-
- mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
- db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
-
- mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
- db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
-
- if (ar->Digest == NULL || ar->Digest[0] == 0) {
- digest = "0";
- } else {
- digest = ar->Digest;
- }
-
- len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES "
- "(%u,%s,'%s','%s','%s','%s',%u)",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest, ar->DeltaSeq);
-
- return INSERT_DB(jcr, mdb, mdb->cmd);
-}
-
-/* set error to something to abort operation */
-/*
- * Returns 1 if OK
- * 0 if failed
- */
-bool my_batch_end(JCR *jcr, B_DB *mdb, const char *error)
-{
-
- Dmsg0(50, "sql_batch_end started\n");
-
- if (mdb) {
-#ifdef HAVE_DBI
- mdb->status = (dbi_error_flag)0;
-#else
- mdb->status = 0;
-#endif
- }
- return true;
-}
-
/*
* Returns true if OK
* false if failed
/*
* We have to lock tables
*/
- if (!db_sql_query(jcr->db_batch, sql_batch_lock_path_query, NULL, NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_lock_path_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr, M_FATAL, 0, "Lock Path table %s\n", jcr->db_batch->errmsg);
goto bail_out;
}
- if (!db_sql_query(jcr->db_batch, sql_batch_fill_path_query, NULL, NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_fill_path_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr, M_FATAL, 0, "Fill Path table %s\n",jcr->db_batch->errmsg);
- db_sql_query(jcr->db_batch, sql_batch_unlock_tables_query, NULL, NULL);
+ db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL);
goto bail_out;
}
- if (!db_sql_query(jcr->db_batch, sql_batch_unlock_tables_query,NULL,NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr, M_FATAL, 0, "Unlock Path table %s\n", jcr->db_batch->errmsg);
goto bail_out;
}
/*
* We have to lock tables
*/
- if (!db_sql_query(jcr->db_batch,sql_batch_lock_filename_query,NULL, NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_lock_filename_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr, M_FATAL, 0, "Lock Filename table %s\n", jcr->db_batch->errmsg);
goto bail_out;
}
- if (!db_sql_query(jcr->db_batch,sql_batch_fill_filename_query, NULL,NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_fill_filename_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr,M_FATAL,0,"Fill Filename table %s\n",jcr->db_batch->errmsg);
- db_sql_query(jcr->db_batch, sql_batch_unlock_tables_query, NULL, NULL);
+ db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL);
goto bail_out;
}
- if (!db_sql_query(jcr->db_batch, sql_batch_unlock_tables_query,NULL,NULL)) {
+ if (!db_sql_query(jcr->db_batch, batch_unlock_tables_query[db_get_type_index(jcr->db_batch)], NULL, NULL)) {
Jmsg1(jcr, M_FATAL, 0, "Unlock Filename table %s\n", jcr->db_batch->errmsg);
goto bail_out;
}
"FROM batch "
"JOIN Path ON (batch.Path = Path.Path) "
"JOIN Filename ON (batch.Name = Filename.Name)",
- NULL,NULL))
+ NULL, NULL))
{
Jmsg1(jcr, M_FATAL, 0, "Fill File table %s\n", jcr->db_batch->errmsg);
goto bail_out;
* db_strerror(mdb) to get the error message, so the error message
* MUST be edited into mdb->errmsg before returning an error status.
*/
-bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+bool db_create_batch_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
ASSERT(ar->FileType != FT_BASE);
return sql_batch_insert(jcr, bdb, ar);
}
-#else /* ! HAVE_BATCH_FILE_INSERT */
-
/**
* Create File record in B_DB
*
db_unlock(mdb);
return false;
}
-
-
/**
* This is the master File entry containing the attributes.
* The filename and path records have already been created.
static int db_create_filename_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
SQL_ROW row;
+ int num_rows;
mdb->esc_name = check_pool_memory_size(mdb->esc_name, 2*mdb->fnl+2);
db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
Mmsg(mdb->cmd, "SELECT FilenameId FROM Filename WHERE Name='%s'", mdb->esc_name);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
char ed1[30];
Mmsg2(&mdb->errmsg, _("More than one Filename! %s for file: %s\n"),
- edit_uint64(mdb->num_rows, ed1), mdb->fname);
+ edit_uint64(num_rows, ed1), mdb->fname);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg2(&mdb->errmsg, _("Error fetching row for file=%s: ERR=%s\n"),
mdb->fname, sql_strerror(mdb));
return ar->FilenameId > 0;
}
-bool db_write_batch_file_records(JCR *jcr)
-{
- return true;
-}
-
-#endif /* ! HAVE_BATCH_FILE_INSERT */
-
/**
* Create file attributes record, or base file attributes record
*/
}
if (ar->FileType != FT_BASE) {
- ret = db_create_file_attributes_record(jcr, mdb, ar);
-
+ if (mdb->batch_insert_available()) {
+ ret = db_create_batch_file_attributes_record(jcr, mdb, ar);
+ } else {
+ ret = db_create_file_attributes_record(jcr, mdb, ar);
+ }
} else if (jcr->HasBase) {
ret = db_create_base_file_attributes_record(jcr, mdb, ar);
-
} else {
Jmsg0(jcr, M_FATAL, 0, _("Can't Copy/Migrate job using BaseJob"));
ret = true; /* in copy/migration what do we do ? */
goto bail_out;
}
- Mmsg(mdb->cmd, create_temp_basefile[db_type], (uint64_t) jcr->JobId);
+ Mmsg(mdb->cmd, create_temp_basefile[db_get_type_index(mdb)], (uint64_t) jcr->JobId);
if (!db_sql_query(mdb, mdb->cmd, NULL, NULL)) {
goto bail_out;
}
- Mmsg(buf, select_recent_version[db_type], jobids, jobids);
- Mmsg(mdb->cmd, create_temp_new_basefile[db_type], (uint64_t)jcr->JobId, buf.c_str());
+ Mmsg(buf, select_recent_version[db_get_type_index(mdb)], jobids, jobids);
+ Mmsg(mdb->cmd, create_temp_new_basefile[db_get_type_index(mdb)], (uint64_t)jcr->JobId, buf.c_str());
ret = db_sql_query(mdb, mdb->cmd, NULL, NULL);
bail_out:
return stat;
}
-
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
/* *****FIXME**** fix fixed length of select_cmd[] and insert_cmd[] */
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
+
/* -----------------------------------------------------------------------
*
* Generic Routines (or almost generic)
db_delete_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
{
SQL_ROW row;
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd, "SELECT PoolId FROM Pool WHERE Name='%s'", pr->Name);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
-
- if (mdb->num_rows == 0) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows == 0) {
Mmsg(mdb->errmsg, _("No pool record %s exists\n"), pr->Name);
sql_free_result(mdb);
db_unlock(mdb);
return 0;
- } else if (mdb->num_rows != 1) {
- Mmsg(mdb->errmsg, _("Expecting one pool record, got %d\n"), mdb->num_rows);
+ } else if (num_rows != 1) {
+ Mmsg(mdb->errmsg, _("Expecting one pool record, got %d\n"), num_rows);
sql_free_result(mdb);
db_unlock(mdb);
return 0;
}
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES */
*
*/
+#include "bacula.h"
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
-#include "bacula.h"
#include "cats.h"
-
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* -----------------------------------------------------------------------
*
strcmp(mr->VolStatus, "Purged") == 0) {
order = "AND Recycle=1 ORDER BY LastWritten ASC,MediaId"; /* take oldest that can be recycled */
} else {
- order = sql_media_order_most_recently_written[db_type]; /* take most recently written */
+ order = sql_media_order_most_recently_written[db_get_type_index(mdb)]; /* take most recently written */
}
Mmsg(mdb->cmd, "SELECT MediaId,VolumeName,VolJobs,VolFiles,VolBlocks,"
"VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
return numrows;
}
-
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL*/
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
*
*/
+#include "bacula.h"
-/**
- * The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
-#include "bacula.h"
#include "cats.h"
-
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* -----------------------------------------------------------------------
*
SQL_ROW row;
int stat = 0;
char ed1[50], ed2[50], ed3[50];
+ int num_rows;
if (jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG) {
Mmsg(mdb->cmd,
Dmsg1(100, "Query=%s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- Dmsg1(050, "get_file_record num_rows=%d\n", (int)mdb->num_rows);
- if (mdb->num_rows >= 1) {
+ num_rows = sql_num_rows(mdb);
+ Dmsg1(050, "get_file_record num_rows=%d\n", num_rows);
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("Error fetching row: %s\n"), sql_strerror(mdb));
} else {
bstrncpy(fdbr->LStat, row[1], sizeof(fdbr->LStat));
bstrncpy(fdbr->Digest, row[2], sizeof(fdbr->Digest));
stat = 1;
- if (mdb->num_rows > 1) {
+ if (num_rows > 1) {
Mmsg3(mdb->errmsg, _("get_file_record want 1 got rows=%d PathId=%s FilenameId=%s\n"),
- mdb->num_rows,
+ num_rows,
edit_int64(fdbr->PathId, ed1),
edit_int64(fdbr->FilenameId, ed2));
Dmsg1(000, "=== Problem! %s", mdb->errmsg);
{
SQL_ROW row;
int FilenameId = 0;
+ int num_rows;
mdb->esc_name = check_pool_memory_size(mdb->esc_name, 2*mdb->fnl+2);
db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
Mmsg(mdb->cmd, "SELECT FilenameId FROM Filename WHERE Name='%s'", mdb->esc_name);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
char ed1[30];
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
Mmsg2(mdb->errmsg, _("More than one Filename!: %s for file: %s\n"),
- edit_uint64(mdb->num_rows, ed1), mdb->fname);
+ edit_uint64(num_rows, ed1), mdb->fname);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
} else {
{
SQL_ROW row;
uint32_t PathId = 0;
+ int num_rows;
mdb->esc_name = check_pool_memory_size(mdb->esc_name, 2*mdb->pnl+2);
db_escape_string(jcr, mdb, mdb->esc_name, mdb->path, mdb->pnl);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
char ed1[30];
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
Mmsg2(mdb->errmsg, _("More than one Path!: %s for path: %s\n"),
- edit_uint64(mdb->num_rows, ed1), mdb->path);
+ edit_uint64(num_rows, ed1), mdb->path);
Jmsg(jcr, M_WARNING, 0, "%s", mdb->errmsg);
}
/* Even if there are multiple paths, take the first one */
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
} else {
char ed1[50];
int stat = 0;
int i;
+ int num_rows;
db_lock(mdb);
/* Get one entry per VolumeName, but "sort" by VolIndex */
Dmsg1(130, "VolNam=%s\n", mdb->cmd);
*VolumeNames[0] = 0;
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- Dmsg1(130, "Num rows=%d\n", mdb->num_rows);
- if (mdb->num_rows <= 0) {
+ num_rows = sql_num_rows(mdb);
+ Dmsg1(130, "Num rows=%d\n", num_rows);
+ if (num_rows <= 0) {
Mmsg1(mdb->errmsg, _("No volumes found for JobId=%d\n"), JobId);
stat = 0;
} else {
- stat = mdb->num_rows;
+ stat = num_rows;
for (i=0; i < stat; i++) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg2(mdb->errmsg, _("Error fetching row %d: ERR=%s\n"), i, sql_strerror(mdb));
int stat = 0;
int i;
VOL_PARAMS *Vols = NULL;
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd,
Dmsg1(130, "VolNam=%s\n", mdb->cmd);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- Dmsg1(200, "Num rows=%d\n", mdb->num_rows);
- if (mdb->num_rows <= 0) {
+ num_rows = sql_num_rows(mdb);
+ Dmsg1(200, "Num rows=%d\n", num_rows);
+ if (num_rows <= 0) {
Mmsg1(mdb->errmsg, _("No volumes found for JobId=%d\n"), JobId);
stat = 0;
} else {
- stat = mdb->num_rows;
+ stat = num_rows;
DBId_t *SId = NULL;
if (stat > 0) {
*VolParams = Vols = (VOL_PARAMS *)malloc(stat * sizeof(VOL_PARAMS));
SQL_ROW row;
bool ok = false;
char ed1[50];
+ int num_rows;
db_lock(mdb);
if (pdbr->PoolId != 0) { /* find by id */
pdbr->Name);
}
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
char ed1[30];
Mmsg1(mdb->errmsg, _("More than one Pool!: %s\n"),
- edit_uint64(mdb->num_rows, ed1));
+ edit_uint64(num_rows, ed1));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
- } else if (mdb->num_rows == 1) {
+ } else if (num_rows == 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
SQL_ROW row;
int stat = 0;
char ed1[50];
+ int num_rows;
db_lock(mdb);
if (cdbr->ClientId != 0) { /* find by id */
}
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
Mmsg1(mdb->errmsg, _("More than one Client!: %s\n"),
- edit_uint64(mdb->num_rows, ed1));
+ edit_uint64(num_rows, ed1));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
- } else if (mdb->num_rows == 1) {
+ } else if (num_rows == 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
int db_get_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
{
SQL_ROW row;
+ int num_rows;
db_lock(mdb);
Mmsg(mdb->cmd, "SELECT \"MinValue\",\"MaxValue\",CurrentValue,WrapCounter "
"FROM Counters WHERE Counter='%s'", cr->Counter);
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
+ num_rows = sql_num_rows(mdb);
/* If more than one, report error, but return first row */
- if (mdb->num_rows > 1) {
- Mmsg1(mdb->errmsg, _("More than one Counter!: %d\n"), (int)(mdb->num_rows));
+ if (num_rows > 1) {
+ Mmsg1(mdb->errmsg, _("More than one Counter!: %d\n"), num_rows);
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
}
- if (mdb->num_rows >= 1) {
+ if (num_rows >= 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching Counter row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
SQL_ROW row;
int stat = 0;
char ed1[50];
+ int num_rows;
db_lock(mdb);
if (fsr->FileSetId != 0) { /* find by id */
}
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
char ed1[30];
Mmsg1(mdb->errmsg, _("Error got %s FileSets but expected only one!\n"),
- edit_uint64(mdb->num_rows, ed1));
- sql_data_seek(mdb, mdb->num_rows-1);
+ edit_uint64(num_rows, ed1));
+ sql_data_seek(mdb, num_rows-1);
}
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("FileSet record \"%s\" not found.\n"), fsr->FileSet);
SQL_ROW row;
char ed1[50];
bool ok = false;
+ int num_rows;
db_lock(mdb);
if (mr->MediaId == 0 && mr->VolumeName[0] == 0) {
if (QUERY_DB(jcr, mdb, mdb->cmd)) {
char ed1[50];
- mdb->num_rows = sql_num_rows(mdb);
- if (mdb->num_rows > 1) {
+ num_rows = sql_num_rows(mdb);
+ if (num_rows > 1) {
Mmsg1(mdb->errmsg, _("More than one Volume!: %s\n"),
- edit_uint64(mdb->num_rows, ed1));
+ edit_uint64(num_rows, ed1));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
- } else if (mdb->num_rows == 1) {
+ } else if (num_rows == 1) {
if ((row = sql_fetch_row(mdb)) == NULL) {
Mmsg1(mdb->errmsg, _("error fetching row: %s\n"), sql_strerror(mdb));
Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
POOL_MEM buf(PM_MESSAGE);
POOL_MEM buf2(PM_MESSAGE);
if (use_delta) {
- Mmsg(buf2, select_recent_version_with_basejob_and_delta[db_type],
+ Mmsg(buf2, select_recent_version_with_basejob_and_delta[db_get_type_index(mdb)],
jobids, jobids, jobids, jobids);
} else {
- Mmsg(buf2, select_recent_version_with_basejob[db_type],
+ Mmsg(buf2, select_recent_version_with_basejob[db_get_type_index(mdb)],
jobids, jobids, jobids, jobids);
}
jobids->reset();
/* First, find the last good Full backup for this job/client/fileset */
- Mmsg(query, create_temp_accurate_jobids[db_type],
+ Mmsg(query, create_temp_accurate_jobids[db_get_type_index(mdb)],
edit_uint64(jcr->JobId, jobid),
edit_uint64(jr->ClientId, clientid),
date,
return ret;
}
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Bacula Glue code for the catalog refactoring.
+ *
+ * Written by Marco van Wieringen, November 2009
+ */
+
+#include "bacula.h"
+
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
+
+/* -----------------------------------------------------------------------
+ *
+ * Generic Glue Routines
+ *
+ * -----------------------------------------------------------------------
+ */
+bool db_match_database(B_DB *mdb, const char *db_driver, const char *db_name,
+ const char *db_address, int db_port)
+{
+ return mdb->db_match_database(db_driver, db_name, db_address, db_port);
+}
+
+B_DB *db_clone_database_connection(B_DB *mdb, JCR *jcr, bool mult_db_connections)
+{
+ return mdb->db_clone_database_connection(jcr, mult_db_connections);
+}
+
+const char *db_get_type(B_DB *mdb)
+{
+ return mdb->db_get_type();
+}
+
+int db_get_type_index(B_DB *mdb)
+{
+ return mdb->db_get_type_index();
+}
+
+bool db_open_database(JCR *jcr, B_DB *mdb)
+{
+ return mdb->db_open_database(jcr);
+}
+
+void db_close_database(JCR *jcr, B_DB *mdb)
+{
+ if (mdb) {
+ mdb->db_close_database(jcr);
+ }
+}
+
+void db_thread_cleanup(B_DB *mdb)
+{
+ mdb->db_thread_cleanup();
+}
+
+void db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+{
+ mdb->db_escape_string(jcr, snew, old, len);
+}
+
+char *db_escape_object(JCR *jcr, B_DB *mdb, char *old, int len)
+{
+ return mdb->db_escape_object(jcr, old, len);
+}
+
+void db_unescape_object(JCR *jcr, B_DB *mdb,
+ char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len)
+{
+ mdb->db_unescape_object(jcr, from, expected_len, dest, len);
+}
+
+void db_start_transaction(JCR *jcr, B_DB *mdb)
+{
+ mdb->db_start_transaction(jcr);
+}
+
+void db_end_transaction(JCR *jcr, B_DB *mdb)
+{
+ mdb->db_end_transaction(jcr);
+}
+
+bool db_sql_query(B_DB *mdb, const char *query, int flags)
+{
+ return mdb->db_sql_query(query, flags);
+}
+
+bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+{
+ return mdb->db_sql_query(query, result_handler, ctx);
+}
+
+void sql_free_result(B_DB *mdb)
+{
+ ((B_DB_PRIV *)mdb)->sql_free_result();
+}
+
+SQL_ROW sql_fetch_row(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_fetch_row();
+}
+
+bool sql_query(B_DB *mdb, const char *query, int flags)
+{
+ return ((B_DB_PRIV *)mdb)->sql_query(query, flags);
+}
+
+const char *sql_strerror(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_strerror();
+}
+
+int sql_num_rows(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_num_rows();
+}
+
+void sql_data_seek(B_DB *mdb, int row)
+{
+ ((B_DB_PRIV *)mdb)->sql_data_seek(row);
+}
+
+int sql_affected_rows(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_affected_rows();
+}
+
+uint64_t sql_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+{
+ return ((B_DB_PRIV *)mdb)->sql_insert_autokey_record(query, table_name);
+}
+
+void sql_field_seek(B_DB *mdb, int field)
+{
+ ((B_DB_PRIV *)mdb)->sql_field_seek(field);
+}
+
+SQL_FIELD *sql_fetch_field(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_fetch_field();
+}
+
+int sql_num_fields(B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_num_fields();
+}
+
+bool sql_field_is_not_null(B_DB *mdb, int field_type)
+{
+ return ((B_DB_PRIV *)mdb)->sql_field_is_not_null(field_type);
+}
+
+bool sql_field_is_numeric(B_DB *mdb, int field_type)
+{
+ return ((B_DB_PRIV *)mdb)->sql_field_is_numeric(field_type);
+}
+
+bool sql_batch_start(JCR *jcr, B_DB *mdb)
+{
+ return ((B_DB_PRIV *)mdb)->sql_batch_start(jcr);
+}
+
+bool sql_batch_end(JCR *jcr, B_DB *mdb, const char *error)
+{
+ return ((B_DB_PRIV *)mdb)->sql_batch_end(jcr, error);
+}
+
+bool sql_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+{
+ return ((B_DB_PRIV *)mdb)->sql_batch_insert(jcr, ar);
+}
+
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+#ifndef __SQL_GLUE_H_
+#define __SQL_GLUE_H_ 1
+
+/*
+ * Prototypes for entry points into the different backends.
+ */
+bool db_match_database(B_DB *mdb, const char *db_driver, const char *db_name,
+ const char *db_address, int db_port);
+B_DB *db_clone_database_connection(B_DB *mdb, JCR *jcr, bool mult_db_connections);
+int db_get_type_index(B_DB *mdb);
+const char *db_get_type(B_DB *mdb);
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port,
+ const char *db_socket, bool mult_db_connections, bool disable_batch_insert);
+bool db_open_database(JCR *jcr, B_DB *mdb);
+void db_close_database(JCR *jcr, B_DB *mdb);
+void db_thread_cleanup(B_DB *mdb);
+void db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len);
+char *db_escape_object(JCR *jcr, B_DB *mdb, char *old, int len);
+void db_unescape_object(JCR *jcr, B_DB *mdb,
+ char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *len);
+void db_start_transaction(JCR *jcr, B_DB *mdb);
+void db_end_transaction(JCR *jcr, B_DB *mdb);
+bool db_sql_query(B_DB *mdb, const char *query, int flags=0);
+bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx);
+
+#ifdef _BDB_PRIV_INTERFACE_
+void sql_free_result(B_DB *mdb);
+SQL_ROW sql_fetch_row(B_DB *mdb);
+bool sql_query(B_DB *mdb, const char *query, int flags=0);
+const char *sql_strerror(B_DB *mdb);
+int sql_num_rows(B_DB *mdb);
+void sql_data_seek(B_DB *mdb, int row);
+int sql_affected_rows(B_DB *mdb);
+uint64_t sql_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name);
+void sql_field_seek(B_DB *mdb, int field);
+SQL_FIELD *sql_fetch_field(B_DB *mdb);
+int sql_num_fields(B_DB *mdb);
+bool sql_field_is_not_null(B_DB *mdb, int field_type);
+bool sql_field_is_numeric(B_DB *mdb, int field_type);
+bool sql_batch_start(JCR *jcr, B_DB *mdb);
+bool sql_batch_end(JCR *jcr, B_DB *mdb, const char *error);
+bool sql_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+#endif /* _BDB_PRIV_INTERFACE_ */
+#endif /* __SQL_GLUE_H_ */
* Version $Id: sql_list.c 8508 2009-03-07 20:59:46Z kerns $
*/
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
#include "cats.h"
-extern int db_type;
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* -----------------------------------------------------------------------
*
void *ctx, int verbose, e_list_type type)
{
db_lock(mdb);
- if (sql_query(mdb, query) != 0) {
+ if (!sql_query(mdb, query, QF_STORE_RESULT)) {
Mmsg(mdb->errmsg, _("Query failed: %s\n"), sql_strerror(mdb));
if (verbose) {
sendit(ctx, mdb->errmsg);
return 0;
}
- mdb->result = sql_store_result(mdb);
-
- if (mdb->result) {
- list_result(jcr, mdb, sendit, ctx, type);
- sql_free_result(mdb);
- }
+ list_result(jcr, mdb, sendit, ctx, type);
+ sql_free_result(mdb);
db_unlock(mdb);
return 1;
}
goto bail_out;
}
- if (mdb->result && sql_num_rows(mdb)) {
+ if (sql_num_rows(mdb)) {
if (JobIds && JobIds[0]) {
sendit(ctx, _("These JobIds have copies as follows:\n"));
} else {
/*
* Stupid MySQL is NON-STANDARD !
*/
- if (db_type == SQL_TYPE_MYSQL) {
+ if (db_get_type_index(mdb) == SQL_TYPE_MYSQL) {
Mmsg(mdb->cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
"FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s "
"UNION ALL "
/*
* Stupid MySQL is NON-STANDARD !
*/
- if (db_type == SQL_TYPE_MYSQL) {
+ if (db_get_type_index(mdb) == SQL_TYPE_MYSQL) {
Mmsg(mdb->cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
"FROM BaseFiles, File, Filename, Path "
"WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId "
db_unlock(mdb);
}
-
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
* Version $Id: sql_update.c 8478 2009-02-18 20:11:55Z kerns $
*/
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
#include "bacula.h"
-#include "cats.h"
-#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI
+
+#include "cats.h"
+#include "bdb_priv.h"
+#include "sql_glue.h"
/* -----------------------------------------------------------------------
*
}
}
-#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES */
+#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, January 2002
*
+ * Major rewrite by Marco van Wieringen, January 2010 for catalog refactoring.
*/
+#include "bacula.h"
+#if HAVE_SQLITE3
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
#include "cats.h"
-
-#if HAVE_SQLITE || HAVE_SQLITE3
+#include "bdb_priv.h"
+#include <sqlite3.h>
+#include "bdb_sqlite.h"
/* -----------------------------------------------------------------------
*
* -----------------------------------------------------------------------
*/
-/* List of open databases */
+/*
+ * List of open databases
+ */
static dlist *db_list = NULL;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
/*
- * Retrieve database type
- */
-const char *
-db_get_type(void)
-{
-#ifdef HAVE_SQLITE3
- return "SQLite3";
-#else
- return "SQLite";
-#endif
-}
-
-/*
- * When using mult_db_connections = 1,
+ * When using mult_db_connections = true,
* sqlite can be BUSY. We just need sleep a little in this case.
*/
-
-#ifdef HAVE_SQLITE3
-static int my_busy_handler(void *arg, int calls)
+static int sqlite_busy_handler(void *arg, int calls)
{
bmicrosleep(0, 500);
return 1;
}
-#else
-static int my_busy_handler(void *arg, const char* p, int calls)
-{
- bmicrosleep(0, 500);
- return 1;
-}
-#endif
-
-/*
- * Initialize database data structure. In principal this should
- * never have errors, or it is really fatal.
- */
-B_DB *
-db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
- int mult_db_connections)
+B_DB_SQLITE::B_DB_SQLITE(JCR *jcr,
+ const char *db_driver,
+ const char *db_name,
+ const char *db_user,
+ const char *db_password,
+ const char *db_address,
+ int db_port,
+ const char *db_socket,
+ bool mult_db_connections,
+ bool disable_batch_insert)
{
- B_DB *mdb = NULL;
+ /*
+ * Initialize the parent class members.
+ */
+ m_db_interface_type = SQL_INTERFACE_TYPE_SQLITE3;
+ m_db_type = SQL_TYPE_SQLITE3;
+ m_db_driver = bstrdup("SQLite3");
+ m_db_name = bstrdup(db_name);
+ if (disable_batch_insert) {
+ m_disabled_batch_insert = true;
+ m_have_batch_insert = false;
+ } else {
+ m_disabled_batch_insert = false;
+#if defined(USE_BATCH_FILE_INSERT)
+#if defined(HAVE_SQLITE3_THREADSAFE)
+ m_have_batch_insert = sqlite3_threadsafe();
+#else
+ m_have_batch_insert = false;
+#endif /* HAVE_SQLITE3_THREADSAFE */
+#else
+ m_have_batch_insert = false;
+#endif /* USE_BATCH_FILE_INSERT */
+ }
+ errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
+ *errmsg = 0;
+ cmd = get_pool_memory(PM_EMSG); /* get command buffer */
+ cached_path = get_pool_memory(PM_FNAME);
+ cached_path_id = 0;
+ m_ref_count = 1;
+ fname = get_pool_memory(PM_FNAME);
+ path = get_pool_memory(PM_FNAME);
+ esc_name = get_pool_memory(PM_FNAME);
+ esc_path = get_pool_memory(PM_FNAME);
+ esc_obj = get_pool_memory(PM_FNAME);
+ m_allow_transactions = mult_db_connections;
- P(mutex); /* lock DB queue */
+ /*
+ * Initialize the private members.
+ */
+ m_db_handle = NULL;
+ m_result = NULL;
+ m_sqlite_errmsg = NULL;
+
+ /*
+ * Put the db in the list.
+ */
if (db_list == NULL) {
- db_list = New(dlist(mdb, &mdb->link));
+ db_list = New(dlist(this, &this->m_link));
}
- /* Look to see if DB already open */
- if (!mult_db_connections) {
- foreach_dlist(mdb, db_list) {
- if (bstrcmp(mdb->db_name, db_name) &&
- bstrcmp(mdb->db_address, db_address) &&
- mdb->db_port == db_port) {
- Dmsg2(300, "DB REopen %d %s\n", mdb->ref_count, db_name);
- mdb->ref_count++;
- V(mutex);
- return mdb; /* already open */
- }
- }
- }
- Dmsg0(300, "db_open first time\n");
- mdb = (B_DB *)malloc(sizeof(B_DB));
- memset(mdb, 0, sizeof(B_DB));
- mdb->db_name = bstrdup(db_name);
- mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
- *mdb->errmsg = 0;
- mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
- mdb->cached_path = get_pool_memory(PM_FNAME);
- mdb->cached_path_id = 0;
- mdb->ref_count = 1;
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
- mdb->esc_path = get_pool_memory(PM_FNAME);
- mdb->esc_obj = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- db_list->append(mdb);
- V(mutex);
- return mdb;
+ db_list->append(this);
+}
+
+B_DB_SQLITE::~B_DB_SQLITE()
+{
}
/*
* Now actually open the database. This can generate errors,
* which are returned in the errmsg
*
- * DO NOT close the database or free(mdb) here !!!!
+ * DO NOT close the database or delete mdb here !!!!
*/
-int
-db_open_database(JCR *jcr, B_DB *mdb)
+bool B_DB_SQLITE::db_open_database(JCR *jcr)
{
- char *db_name;
+ bool retval = false;
+ char *db_path;
int len;
struct stat statbuf;
+ int ret;
int errstat;
int retry = 0;
P(mutex);
- if (mdb->connected) {
- V(mutex);
- return 1;
+ if (m_connected) {
+ retval = true;
+ goto bail_out;
}
- mdb->connected = FALSE;
- if ((errstat=rwl_init(&mdb->lock)) != 0) {
+ if ((errstat=rwl_init(&m_lock)) != 0) {
berrno be;
- Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
+ Mmsg1(&errmsg, _("Unable to initialize DB lock. ERR=%s\n"),
be.bstrerror(errstat));
- V(mutex);
- return 0;
+ goto bail_out;
}
- /* open the database */
- len = strlen(working_directory) + strlen(mdb->db_name) + 5;
- db_name = (char *)malloc(len);
- strcpy(db_name, working_directory);
- strcat(db_name, "/");
- strcat(db_name, mdb->db_name);
- strcat(db_name, ".db");
- if (stat(db_name, &statbuf) != 0) {
- Mmsg1(&mdb->errmsg, _("Database %s does not exist, please create it.\n"),
- db_name);
- free(db_name);
- V(mutex);
- return 0;
+ /*
+ * Open the database
+ */
+ len = strlen(working_directory) + strlen(m_db_name) + 5;
+ db_path = (char *)malloc(len);
+ strcpy(db_path, working_directory);
+ strcat(db_path, "/");
+ strcat(db_path, m_db_name);
+ strcat(db_path, ".db");
+ if (stat(db_path, &statbuf) != 0) {
+ Mmsg1(&errmsg, _("Database %s does not exist, please create it.\n"),
+ db_path);
+ free(db_path);
+ goto bail_out;
}
- for (mdb->db=NULL; !mdb->db && retry++ < 10; ) {
-#ifdef HAVE_SQLITE3
- int stat = sqlite3_open(db_name, &mdb->db);
- if (stat != SQLITE_OK) {
- mdb->sqlite_errmsg = (char *)sqlite3_errmsg(mdb->db);
- sqlite3_close(mdb->db);
- mdb->db = NULL;
+ for (m_db_handle = NULL; !m_db_handle && retry++ < 10; ) {
+ ret = sqlite3_open(db_path, &m_db_handle);
+ if (ret != SQLITE_OK) {
+ m_sqlite_errmsg = (char *)sqlite3_errmsg(m_db_handle);
+ sqlite3_close(m_db_handle);
+ m_db_handle = NULL;
} else {
- mdb->sqlite_errmsg = NULL;
+ m_sqlite_errmsg = NULL;
}
-#else
- mdb->db = sqlite_open(
- db_name, /* database name */
- 644, /* mode */
- &mdb->sqlite_errmsg); /* error message */
-#endif
Dmsg0(300, "sqlite_open\n");
- if (!mdb->db) {
+ if (!m_db_handle) {
bmicrosleep(1, 0);
}
}
- if (mdb->db == NULL) {
- Mmsg2(&mdb->errmsg, _("Unable to open Database=%s. ERR=%s\n"),
- db_name, mdb->sqlite_errmsg ? mdb->sqlite_errmsg : _("unknown"));
- free(db_name);
- V(mutex);
- return 0;
+ if (m_db_handle == NULL) {
+ Mmsg2(&errmsg, _("Unable to open Database=%s. ERR=%s\n"),
+ db_path, m_sqlite_errmsg ? m_sqlite_errmsg : _("unknown"));
+ free(db_path);
+ goto bail_out;
}
- mdb->connected = true;
- free(db_name);
+ m_connected = true;
+ free(db_path);
- /* set busy handler to wait when we use mult_db_connections = 1 */
-#ifdef HAVE_SQLITE3
- sqlite3_busy_handler(mdb->db, my_busy_handler, NULL);
-#else
- sqlite_busy_handler(mdb->db, my_busy_handler, NULL);
-#endif
+ /*
+ * Set busy handler to wait when we use mult_db_connections = true
+ */
+ sqlite3_busy_handler(m_db_handle, sqlite_busy_handler, NULL);
-#if defined(HAVE_SQLITE3) && defined(SQLITE3_INIT_QUERY)
- db_sql_query(mdb, SQLITE3_INIT_QUERY, NULL, NULL);
+#if defined(SQLITE3_INIT_QUERY)
+ sql_query(SQLITE3_INIT_QUERY);
#endif
- if (!check_tables_version(jcr, mdb)) {
- V(mutex);
- return 0;
+ if (!check_tables_version(jcr, this)) {
+ goto bail_out;
}
+ retval = true;
+bail_out:
V(mutex);
- return 1;
+ return retval;
}
-void
-db_close_database(JCR *jcr, B_DB *mdb)
+void B_DB_SQLITE::db_close_database(JCR *jcr)
{
- if (!mdb) {
- return;
- }
- db_end_transaction(jcr, mdb);
+ db_end_transaction(jcr);
P(mutex);
- sql_free_result(mdb);
- mdb->ref_count--;
- if (mdb->ref_count == 0) {
- db_list->remove(mdb);
- if (mdb->connected && mdb->db) {
- sqlite_close(mdb->db);
+ sql_free_result();
+ m_ref_count--;
+ if (m_ref_count == 0) {
+ db_list->remove(this);
+ if (m_connected && m_db_handle) {
+ sqlite3_close(m_db_handle);
+ }
+ rwl_destroy(&m_lock);
+ free_pool_memory(errmsg);
+ free_pool_memory(cmd);
+ free_pool_memory(cached_path);
+ free_pool_memory(fname);
+ free_pool_memory(path);
+ free_pool_memory(esc_name);
+ free_pool_memory(esc_path);
+ free_pool_memory(esc_obj);
+ if (m_db_driver) {
+ free(m_db_driver);
}
- rwl_destroy(&mdb->lock);
- free_pool_memory(mdb->errmsg);
- free_pool_memory(mdb->cmd);
- free_pool_memory(mdb->cached_path);
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
- free_pool_memory(mdb->esc_path);
- free_pool_memory(mdb->esc_obj);
- if (mdb->db_name) {
- free(mdb->db_name);
+ if (m_db_name) {
+ free(m_db_name);
}
- free(mdb);
+ delete this;
if (db_list->size() == 0) {
delete db_list;
db_list = NULL;
V(mutex);
}
-void db_check_backend_thread_safe()
+void B_DB_SQLITE::db_thread_cleanup(void)
{
-#ifdef HAVE_BATCH_FILE_INSERT
-# ifdef HAVE_SQLITE3_THREADSAFE
- if (!sqlite3_threadsafe()) {
- Emsg0(M_ABORT, 0, _("SQLite3 client library must be thread-safe "
- "when using BatchMode.\n"));
- }
-# endif
-#endif
-}
-
-void db_thread_cleanup()
-{
-#ifdef HAVE_SQLITE3
sqlite3_thread_cleanup();
-#endif
}
/*
- * Return the next unique index (auto-increment) for
- * the given table. Return 0 on error.
+ * Escape strings so that SQLite is happy
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
*/
-int db_next_index(JCR *jcr, B_DB *mdb, char *table, char *index)
+void B_DB_SQLITE::db_escape_string(JCR *jcr, char *snew, char *old, int len)
{
- strcpy(index, "NULL");
- return 1;
+ char *n, *o;
+
+ n = snew;
+ o = old;
+ while (len--) {
+ switch (*o) {
+ case '\'':
+ *n++ = '\'';
+ *n++ = '\'';
+ o++;
+ break;
+ case 0:
+ *n++ = '\\';
+ *n++ = 0;
+ o++;
+ break;
+ default:
+ *n++ = *o++;
+ break;
+ }
+ }
+ *n = 0;
}
/*
*
* TODO: this should be implemented (escape \0)
*/
-char *
-db_escape_object(JCR *jcr, B_DB *mdb, char *old, int len)
+char *B_DB_SQLITE::db_escape_object(JCR *jcr, char *old, int len)
{
char *n, *o;
- n = mdb->esc_obj = check_pool_memory_size(mdb->esc_obj, len*2+1);
+ n = esc_obj = check_pool_memory_size(esc_obj, len*2+1);
o = old;
while (len--) {
switch (*o) {
}
}
*n = 0;
- return mdb->esc_obj;
+ return esc_obj;
}
/*
*
* TODO: need to be implemented (escape \0)
*/
-void
-db_unescape_object(JCR *jcr, B_DB *mdb,
- char *from, int32_t expected_len,
- POOLMEM **dest, int32_t *dest_len)
+void B_DB_SQLITE::db_unescape_object(JCR *jcr, char *from, int32_t expected_len,
+ POOLMEM **dest, int32_t *dest_len)
{
if (!from) {
*dest[0] = 0;
}
/*
- * Escape strings so that SQLite is happy
- *
- * NOTE! len is the length of the old string. Your new
- * string must be long enough (max 2*old+1) to hold
- * the escaped output.
+ * Start a transaction. This groups inserts and makes things
+ * much more efficient. Usually started when inserting
+ * file attributes.
*/
-void
-db_escape_string(JCR *jcr, B_DB *mdb, char *snew, char *old, int len)
+void B_DB_SQLITE::db_start_transaction(JCR *jcr)
{
- char *n, *o;
+ if (!jcr->attr) {
+ jcr->attr = get_pool_memory(PM_FNAME);
+ }
+ if (!jcr->ar) {
+ jcr->ar = (ATTR_DBR *)malloc(sizeof(ATTR_DBR));
+ }
- n = snew;
- o = old;
- while (len--) {
- switch (*o) {
- case '\'':
- *n++ = '\'';
- *n++ = '\'';
- o++;
- break;
- case 0:
- *n++ = '\\';
- *n++ = 0;
- o++;
- break;
- default:
- *n++ = *o++;
- break;
+ if (!m_allow_transactions) {
+ return;
+ }
+
+ db_lock(this);
+ /*
+ * Allow only 10,000 changes per transaction
+ */
+ if (m_transaction && changes > 10000) {
+ db_end_transaction(jcr);
+ }
+ if (!m_transaction) {
+ sql_query("BEGIN"); /* begin transaction */
+ Dmsg0(400, "Start SQLite transaction\n");
+ m_transaction = true;
+ }
+ db_unlock(this);
+}
+
+void B_DB_SQLITE::db_end_transaction(JCR *jcr)
+{
+ if (jcr && jcr->cached_attribute) {
+ Dmsg0(400, "Flush last cached attribute.\n");
+ if (!db_create_attributes_record(jcr, this, jcr->ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
+ jcr->cached_attribute = false;
}
- *n = 0;
+
+ if (!m_allow_transactions) {
+ return;
+ }
+
+ db_lock(this);
+ if (m_transaction) {
+ sql_query("COMMIT"); /* end transaction */
+ m_transaction = false;
+ Dmsg1(400, "End SQLite transaction changes=%d\n", changes);
+ }
+ changes = 0;
+ db_unlock(this);
}
struct rh_data {
/*
* Convert SQLite's callback into Bacula DB callback
*/
-static int sqlite_result(void *arh_data, int num_fields, char **rows, char **col_names)
+static int sqlite_sqlite_result(void *arh_data, int num_fields, char **rows, char **col_names)
{
struct rh_data *rh_data = (struct rh_data *)arh_data;
/*
* Submit a general SQL command (cmd), and for each row returned,
- * the sqlite_handler is called with the ctx.
+ * the result_handler is called with the ctx.
*/
-bool db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
+bool B_DB_SQLITE::db_sql_query(const char *query, DB_RESULT_HANDLER *result_handler, void *ctx)
{
- struct rh_data rh_data;
+ bool retval = false;
int stat;
+ struct rh_data rh_data;
- db_lock(mdb);
- if (mdb->sqlite_errmsg) {
-#ifdef HAVE_SQLITE3
- sqlite3_free(mdb->sqlite_errmsg);
-#else
- actuallyfree(mdb->sqlite_errmsg);
-#endif
- mdb->sqlite_errmsg = NULL;
+ Dmsg1(500, "db_sql_query starts with '%s'\n", query);
+
+ db_lock(this);
+ if (m_sqlite_errmsg) {
+ sqlite3_free(m_sqlite_errmsg);
+ m_sqlite_errmsg = NULL;
}
rh_data.result_handler = result_handler;
rh_data.ctx = ctx;
- stat = sqlite_exec(mdb->db, query, sqlite_result, (void *)&rh_data, &mdb->sqlite_errmsg);
+ stat = sqlite3_exec(m_db_handle, query, sqlite_sqlite_result, (void *)&rh_data, &m_sqlite_errmsg);
if (stat != SQLITE_OK) {
- Mmsg(mdb->errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror(mdb));
- db_unlock(mdb);
- return false;
+ Mmsg(errmsg, _("Query failed: %s: ERR=%s\n"), query, sql_strerror());
+ Dmsg0(500, "db_sql_query finished\n");
+ goto bail_out;
}
- db_unlock(mdb);
- return true;
+ Dmsg0(500, "db_sql_query finished\n");
+ retval = true;
+
+bail_out:
+ db_unlock(this);
+ return retval;
}
/*
* Submit a sqlite query and retrieve all the data
*/
-int my_sqlite_query(B_DB *mdb, const char *cmd)
+bool B_DB_SQLITE::sql_query(const char *query, int flags)
{
int stat;
+ bool retval = false;
- my_sqlite_free_table(mdb);
- if (mdb->sqlite_errmsg) {
-#ifdef HAVE_SQLITE3
- sqlite3_free(mdb->sqlite_errmsg);
-#else
- actuallyfree(mdb->sqlite_errmsg);
-#endif
- mdb->sqlite_errmsg = NULL;
+ Dmsg1(500, "sql_query starts with '%s'\n", query);
+
+ if (m_result) {
+ sql_free_result();
+ }
+ if (m_sqlite_errmsg) {
+ sqlite3_free(m_sqlite_errmsg);
+ m_sqlite_errmsg = NULL;
}
- stat = sqlite_get_table(mdb->db, (char *)cmd, &mdb->result, &mdb->nrow, &mdb->ncolumn,
- &mdb->sqlite_errmsg);
- mdb->row = 0; /* no row fetched yet */
+
+ stat = sqlite3_get_table(m_db_handle, (char *)query, &m_result,
+ &m_num_rows, &m_num_fields, &m_sqlite_errmsg);
+
+ m_row_number = 0; /* no row fetched */
if (stat != 0) { /* something went wrong */
- mdb->nrow = mdb->ncolumn = 0;
+ m_num_rows = m_num_fields = 0;
+ Dmsg0(500, "sql_query finished\n");
+ } else {
+ Dmsg0(500, "sql_query finished\n");
+ retval = true;
}
- return stat;
+ return retval;
}
-/* Fetch one row at a time */
-SQL_ROW my_sqlite_fetch_row(B_DB *mdb)
+void B_DB_SQLITE::sql_free_result(void)
{
- if (!mdb->result || (mdb->row >= mdb->nrow)) {
+ db_lock(this);
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ if (m_result) {
+ sqlite3_free_table(m_result);
+ m_result = NULL;
+ }
+ m_num_rows = m_num_fields = 0;
+ db_unlock(this);
+}
+
+/*
+ * Fetch one row at a time
+ */
+SQL_ROW B_DB_SQLITE::sql_fetch_row(void)
+{
+ if (!m_result || (m_row_number >= m_num_rows)) {
return NULL;
}
- mdb->row++;
- return &mdb->result[mdb->ncolumn * mdb->row];
+ m_row_number++;
+ return &m_result[m_num_fields * m_row_number];
}
-void my_sqlite_free_table(B_DB *mdb)
+const char *B_DB_SQLITE::sql_strerror(void)
{
- int i;
+ return m_sqlite_errmsg ? m_sqlite_errmsg : "unknown";
+}
- if (mdb->fields_defined) {
- for (i=0; i < sql_num_fields(mdb); i++) {
- if (mdb->fields[i]) {
- free(mdb->fields[i]);
- mdb->fields[i] = NULL;
- }
- }
- if (mdb->fields) {
- free(mdb->fields);
- mdb->fields = NULL;
- }
- mdb->fields_defined = false;
+void B_DB_SQLITE::sql_data_seek(int row)
+{
+ /*
+ * Set the row number to be returned on the next call to sql_fetch_row
+ */
+ m_row_number = row;
+}
+
+int B_DB_SQLITE::sql_affected_rows(void)
+{
+ return sqlite3_changes(m_db_handle);
+}
+
+uint64_t B_DB_SQLITE::sql_insert_autokey_record(const char *query, const char *table_name)
+{
+ /*
+ * First execute the insert query and then retrieve the currval.
+ */
+ if (!sql_query(query)) {
+ return 0;
}
- if (mdb->result) {
- sqlite_free_table(mdb->result);
- mdb->result = NULL;
+
+ m_num_rows = sql_affected_rows();
+ if (m_num_rows != 1) {
+ return 0;
}
- mdb->nrow = mdb->ncolumn = 0;
+
+ changes++;
+
+ return sqlite3_last_insert_rowid(m_db_handle);
}
-void my_sqlite_field_seek(B_DB *mdb, int field)
+SQL_FIELD *B_DB_SQLITE::sql_fetch_field(void)
{
- int i, j;
- if (mdb->result == NULL) {
- mdb->field = 0;
- return;
- }
- /* On first call, set up the fields */
- if (!mdb->fields_defined && sql_num_fields(mdb) > 0) {
- mdb->fields = (SQL_FIELD **)malloc(sizeof(SQL_FIELD) * mdb->ncolumn);
- for (i=0; i < sql_num_fields(mdb); i++) {
- mdb->fields[i] = (SQL_FIELD *)malloc(sizeof(SQL_FIELD));
- /* ***FIXME*** it seems to me that this is wrong
- * fields has lots of items
- */
- if (mdb->result[i] == NULL) {
- mdb->fields_defined = false;
- free(mdb->fields);
- mdb->fields = NULL;
- mdb->field = 0;
- return;
- }
- mdb->fields[i]->name = mdb->result[i];
- mdb->fields[i]->length = cstrlen(mdb->fields[i]->name);
- mdb->fields[i]->max_length = mdb->fields[i]->length;
- for (j=1; j <= mdb->nrow; j++) {
- int len;
- if (mdb->result[i + mdb->ncolumn *j]) {
- len = (uint32_t)cstrlen(mdb->result[i + mdb->ncolumn * j]);
+ int i, j, len;
+
+ if (!m_fields || m_fields_size < m_num_fields) {
+ if (m_fields) {
+ free(m_fields);
+ m_fields = NULL;
+ }
+ Dmsg1(500, "allocating space for %d fields\n", m_num_fields);
+ m_fields = (SQL_FIELD *)malloc(sizeof(SQL_FIELD) * m_num_fields);
+ m_fields_size = m_num_fields;
+
+ for (i = 0; i < m_num_fields; i++) {
+ Dmsg1(500, "filling field %d\n", i);
+ m_fields[i].name = m_result[i];
+ m_fields[i].max_length = cstrlen(m_fields[i].name);
+ for (j = 1; j <= m_num_rows; j++) {
+ if (m_result[i + m_num_fields * j]) {
+ len = (uint32_t)cstrlen(m_result[i + m_num_fields * j]);
} else {
len = 0;
}
- if (len > mdb->fields[i]->max_length) {
- mdb->fields[i]->max_length = len;
+ if (len > m_fields[i].max_length) {
+ m_fields[i].max_length = len;
}
}
- mdb->fields[i]->type = 0;
- mdb->fields[i]->flags = 1; /* not null */
+ m_fields[i].type = 0;
+ m_fields[i].flags = 1; /* not null */
+
+ Dmsg4(500, "sql_fetch_field finds field '%s' has length='%d' type='%d' and IsNull=%d\n",
+ m_fields[i].name, m_fields[i].max_length, m_fields[i].type, m_fields[i].flags);
}
- mdb->fields_defined = true;
- }
- if (sql_num_fields(mdb) <= 0) {
- field = 0;
- } else if (field > sql_num_fields(mdb) - 1) {
- field = sql_num_fields(mdb) - 1;
- }
- mdb->field = field;
+ }
+
+ /*
+ * Increment field number for the next time around
+ */
+ return &m_fields[m_field_number++];
}
-SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb)
+bool B_DB_SQLITE::sql_field_is_not_null(int field_type)
{
- if (mdb->fields_defined && mdb->field < sql_num_fields(mdb)) {
- return mdb->fields[mdb->field++];
- } else {
- mdb->field = 0;
- return NULL;
+ switch (field_type) {
+ case 1:
+ return true;
+ default:
+ return false;
}
}
-uint64_t my_sqlite_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+bool B_DB_SQLITE::sql_field_is_numeric(int field_type)
{
- /*
- * First execute the insert query and then retrieve the currval.
- */
- if (my_sqlite_query(mdb, query)) {
- return 0;
+ switch (field_type) {
+ case 1:
+ return true;
+ default:
+ return false;
}
+}
- mdb->num_rows = sql_affected_rows(mdb);
- if (mdb->num_rows != 1) {
- return 0;
- }
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_SQLITE::sql_batch_start(JCR *jcr)
+{
+ bool retval;
+
+ db_lock(this);
+ retval = sql_query("CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob,"
+ "MarkId integer)");
+ db_unlock(this);
+
+ return retval;
+}
- mdb->changes++;
+/* set error to something to abort operation */
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_SQLITE::sql_batch_end(JCR *jcr, const char *error)
+{
+ m_status = 0;
-#ifdef HAVE_SQLITE3
- return sqlite3_last_insert_rowid(mdb->db);
-#else
- return sqlite_last_insert_rowid(mdb->db);
-#endif
+ return true;
}
-#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_sqlite_batch_lock_query = "BEGIN";
-const char *my_sqlite_batch_unlock_query = "COMMIT";
+/*
+ * Returns true if OK
+ * false if failed
+ */
+bool B_DB_SQLITE::sql_batch_insert(JCR *jcr, ATTR_DBR *ar)
+{
+ size_t len;
+ const char *digest;
+ char ed1[50];
+
+ esc_name = check_pool_memory_size(esc_name, fnl*2+1);
+ db_escape_string(jcr, esc_name, fname, fnl);
-const char *my_sqlite_batch_fill_path_query =
- "INSERT INTO Path (Path)"
- " SELECT DISTINCT Path FROM batch"
- " EXCEPT SELECT Path FROM Path";
+ esc_path = check_pool_memory_size(esc_path, pnl*2+1);
+ db_escape_string(jcr, esc_path, path, pnl);
-const char *my_sqlite_batch_fill_filename_query =
- "INSERT INTO Filename (Name)"
- " SELECT DISTINCT Name FROM batch "
- " EXCEPT SELECT Name FROM Filename";
-#endif /* HAVE_BATCH_FILE_INSERT */
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+ len = Mmsg(cmd, "INSERT INTO batch VALUES "
+ "(%u,%s,'%s','%s','%s','%s',%u)",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), esc_path,
+ esc_name, ar->attr, digest, ar->DeltaSeq);
+
+ return sql_query(cmd);
+}
+
+/*
+ * Initialize database data structure. In principal this should
+ * never have errors, or it is really fatal.
+ */
+B_DB *db_init_database(JCR *jcr, const char *db_driver, const char *db_name,
+ const char *db_user, const char *db_password,
+ const char *db_address, int db_port,
+ const char *db_socket, bool mult_db_connections,
+ bool disable_batch_insert)
+{
+ B_DB *mdb = NULL;
+
+ P(mutex); /* lock DB queue */
+ /*
+ * Look to see if DB already open
+ */
+ if (db_list && !mult_db_connections) {
+ foreach_dlist(mdb, db_list) {
+ if (mdb->db_match_database(db_driver, db_name, db_address, db_port)) {
+ Dmsg1(300, "DB REopen %s\n", db_name);
+ mdb->increment_refcount();
+ goto bail_out;
+ }
+ }
+ }
+ Dmsg0(300, "db_init_database first time\n");
+ mdb = New(B_DB_SQLITE(jcr, db_driver, db_name, db_user, db_password,
+ db_address, db_port, db_socket, mult_db_connections,
+ disable_batch_insert));
+
+bail_out:
+ V(mutex);
+ return mdb;
+}
-#endif /* HAVE_SQLITE */
+#endif /* HAVE_SQLITE3 */
#
# shell script to invoke SQLite on Bacula database
-bindir=@SQL_BINDIR@
+bindir=@SQLITE_BINDIR@
db_name=@db_name@
$bindir/sqlite @working_dir@/${db_name}.db
#!/bin/sh
#
# This routine alters the appropriately configured
-# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite.
+# Bacula tables for PostgreSQL, Ingres, MySQL, or SQLite.
#
-if test xsqlite3 = x@DB_TYPE@ ; then
- echo "Altering SQLite tables"
- @scriptdir@/update_@DB_TYPE@_tables $*
-fi
-if test xmysql = x@DB_TYPE@ ; then
- echo "Altering MySQL tables"
- @scriptdir@/update_mysql_tables $*
-fi
-if test xingres = x@DB_TYPE@ ; then
- echo "Altering Ingres tables"
- @scriptdir@/update_ingres_tables $*
+
+default_db_type=@DEFAULT_DB_TYPE@
+
+#
+# See if the first argument is a valid backend name.
+# If so the user overrides the default database backend.
+#
+if [ $# -gt 0 ]; then
+ case $1 in
+ sqlite3)
+ db_type=$1
+ shift
+ ;;
+ mysql)
+ db_type=$1
+ shift
+ ;;
+ postgresql)
+ db_type=$1
+ shift
+ ;;
+ ingres)
+ db_type=$1
+ shift
+ ;;
+ *)
+ ;;
+ esac
fi
-if test xpostgresql = x@DB_TYPE@ ; then
- echo "Altering PostgreSQL tables"
- @scriptdir@/update_postgresql_tables $*
+
+#
+# If no new db_type is gives use the default db_type.
+#
+if [ -z "${db_type}" ]; then
+ db_type="${default_db_type}"
fi
+
+echo "Altering ${db_type} tables"
+@scriptdir@/update_${db_type}_tables $*
echo " "
echo "This script will update a Bacula Ingres database (if needed)"
echo " "
-bindir=@SQL_BINDIR@
+bindir=@INGRES_BINDIR@
db_name=@db_name@
echo "Update of Bacula Ingres tables succeeded. (nothing to do)"
echo " which is needed to convert from Bacula Enterprise version 2.6.x to 6.0.x"
echo " or Bacula Community version 5.0.x to 5.2.x"
echo " "
-bindir=@SQL_BINDIR@
+bindir=@MYSQL_BINDIR@
PATH="$bindir:$PATH"
db_name=@db_name@
echo " or Bacula Community version 5.0.x to 5.2.x"
echo " "
-bindir=@SQL_BINDIR@
+bindir=@POSTGRESQL_BINDIR@
PATH="$bindir:$PATH"
db_name=@db_name@
echo " or Bacula Community version 5.0.x to 5.2.x"
echo " "
-bindir=@SQL_BINDIR@
+bindir=@SQLITE_BINDIR@
PATH="$bindir:$PATH"
cd @working_dir@
-sqlite=@DB_TYPE@
db_name=@db_name@
-DBVERSION=`${sqlite} ${db_name}.db <<END
+DBVERSION=`sqlite3 ${db_name}.db <<END
select VersionId from Version;
END
`
exit 1
fi
-${sqlite} $* ${db_name}.db <<END-OF-DATA
+sqlite3 $* ${db_name}.db <<END-OF-DATA
BEGIN;
CREATE TABLE RestoreObject (
PYTHON_LIBS = @PYTHON_LIBS@
PYTHON_INC = @PYTHON_INCDIR@
+DB_LIBS=@DB_LIBS@
+
first_rule: all
dummy:
@echo "==== Make of dird is good ===="
@echo " "
-bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE)
+bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \
+ ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE)
@echo "Linking $@ ..."
$(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SVROBJS) \
- -lbacfind -lbacsql -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(DLIB) $(DB_LIBS) $(LIBS) \
+ -lbacfind -lbacsql -lbaccats -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(DLIB) $(DB_LIBS) $(LIBS) \
$(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
-static-bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE)
+static-bacula-dir: Makefile $(SVROBJS) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \
+ ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE)
$(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -L../cats -L../findlib -o $@ $(SVROBJS) \
- -lbacfind -lbacsql -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(DLIB) $(DB_LIBS) $(LIBS) \
+ -lbacfind -lbacsql -lbaccats -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(DLIB) $(DB_LIBS) $(LIBS) \
$(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
strip $@
my_name_is(0, NULL, director->name()); /* set user defined name */
/* Plug database interface for library routines */
- p_sql_query = (sql_query)dir_sql_query;
- p_sql_escape = (sql_escape)db_escape_string;
+ p_sql_query = (sql_query_func)dir_sql_query;
+ p_sql_escape = (sql_escape_func)db_escape_string;
FDConnectTimeout = (int)director->FDConnectTimeout;
SDConnectTimeout = (int)director->SDConnectTimeout;
* Make sure we can open catalog, otherwise print a warning
* message because the server is probably not running.
*/
- db = db_init(NULL, catalog->db_driver, catalog->db_name, catalog->db_user,
- catalog->db_password, catalog->db_address,
- catalog->db_port, catalog->db_socket,
- catalog->mult_db_connections);
+ db = db_init_database(NULL, catalog->db_driver, catalog->db_name, catalog->db_user,
+ catalog->db_password, catalog->db_address,
+ catalog->db_port, catalog->db_socket,
+ catalog->mult_db_connections,
+ catalog->disable_batch_insert);
if (!db || !db_open_database(NULL, db)) {
Pmsg2(000, _("Could not open Catalog \"%s\", database \"%s\".\n"),
catalog->name(), catalog->db_name);
continue;
}
- /* Check if the SQL library is thread-safe */
- db_check_backend_thread_safe();
-
/* Display a message if the db max_connections is too low */
if (!db_check_max_connections(NULL, db, director->MaxConcurrentJobs)) {
Pmsg1(000, "Warning, settings problem for Catalog=%s\n", catalog->name());
db_close_database(NULL, db);
}
- /* Set type in global for debugging */
- set_db_type(db_get_type());
return OK;
}
#include "dir_plugins.h"
#include "cats/cats.h"
+#include "cats/sql_glue.h"
#include "jcr.h"
#include "bsr.h"
{"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
/* Turned off for the moment */
{"multipleconnections", store_bit, ITEM(res_cat.mult_db_connections), 0, 0, 0},
+ {"disablebatchinsert", store_bool, ITEM(res_cat.disable_batch_insert), 0, ITEM_DEFAULT, false},
{NULL, NULL, {0}, 0, 0, 0}
};
char *db_name;
char *db_driver; /* Select appropriate driver */
uint32_t mult_db_connections; /* set if multiple connections wanted */
+ bool disable_batch_insert; /* set if batch inserts should be disabled */
/* Methods */
char *name() const;
* Open database
*/
Dmsg0(100, "Open database\n");
- jcr->db=db_init(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
- jcr->catalog->db_user,
- jcr->catalog->db_password, jcr->catalog->db_address,
- jcr->catalog->db_port, jcr->catalog->db_socket,
- jcr->catalog->mult_db_connections);
+ jcr->db = db_init_database(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
+ jcr->catalog->db_user, jcr->catalog->db_password,
+ jcr->catalog->db_address, jcr->catalog->db_port,
+ jcr->catalog->db_socket, jcr->catalog->mult_db_connections,
+ jcr->catalog->disable_batch_insert);
if (!jcr->db || !db_open_database(jcr, jcr->db)) {
Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
jcr->catalog->db_name);
pthread_cond_broadcast(&jcr->term_wait); /* wakeup any waiting threads */
Dmsg2(100, "=== End msg_thread. JobId=%d usecnt=%d\n", jcr->JobId, jcr->use_count());
free_jcr(jcr); /* release jcr */
- db_thread_cleanup(); /* remove thread specific data */
+ db_thread_cleanup(jcr->db); /* remove thread specific data */
}
/*
jcr->catalog->db_name, jcr->catalog->db_address,
jcr->catalog->db_user, jcr->catalog->db_password,
jcr->catalog->db_socket, jcr->catalog->db_port,
- db_get_type());
+ db_get_type(jcr->db));
case 15: /* JobErrors */
return Py_BuildValue((char *)getvars[i].fmt, jcr->JobErrors);
case 16: /* JobFiles */
ua->jcr->catalog = ua->catalog;
Dmsg0(100, "UA Open database\n");
- ua->db = db_init(ua->jcr, ua->catalog->db_driver, ua->catalog->db_name,
+ ua->db = db_init_database(ua->jcr, ua->catalog->db_driver, ua->catalog->db_name,
ua->catalog->db_user,
ua->catalog->db_password, ua->catalog->db_address,
ua->catalog->db_port, ua->catalog->db_socket,
- mult_db_conn);
+ mult_db_conn, ua->catalog->disable_batch_insert);
if (!ua->db || !db_open_database(ua->jcr, ua->db)) {
ua->error_msg(_("Could not open catalog database \"%s\".\n"),
ua->catalog->db_name);
}
get_out:
- db_close_database(jcr, jcr->db);
- jcr->db = NULL;
+ if (jcr->db) {
+ db_close_database(jcr, jcr->db);
+ jcr->db = NULL;
+ }
free_jcr(jcr);
if (!found) {
ua->error_msg(_("Could not find next Volume for Job %s.\n"),
}
Dmsg0(100, "complete_jcr open db\n");
- jcr->db = db_init(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
- jcr->catalog->db_user,
- jcr->catalog->db_password, jcr->catalog->db_address,
- jcr->catalog->db_port, jcr->catalog->db_socket,
- jcr->catalog->mult_db_connections);
+ jcr->db = db_init_database(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
+ jcr->catalog->db_user,
+ jcr->catalog->db_password, jcr->catalog->db_address,
+ jcr->catalog->db_port, jcr->catalog->db_socket,
+ jcr->catalog->mult_db_connections,
+ jcr->catalog->disable_batch_insert);
if (!jcr->db || !db_open_database(jcr, jcr->db)) {
Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
jcr->catalog->db_name);
static bool create_temp_tables(UAContext *ua)
{
/* Create temp tables and indicies */
- if (!db_sql_query(ua->db, create_deltabs[db_type], NULL, (void *)NULL)) {
+ if (!db_sql_query(ua->db, create_deltabs[db_get_type_index(ua->db)], NULL, (void *)NULL)) {
ua->error_msg("%s", db_strerror(ua->db));
Dmsg0(050, "create DelTables table failed\n");
return false;
db_lock(ua->db);
/* Do it in two times for mysql */
- Mmsg(query, uap_upgrade_copies_oldest_job[db_type], JT_JOB_COPY, jobs, jobs);
+ Mmsg(query, uap_upgrade_copies_oldest_job[db_get_type_index(ua->db)], JT_JOB_COPY, jobs, jobs);
db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
Dmsg1(050, "Upgrade copies Log sql=%s\n", query.c_str());
len = strlen(ua->cmd);
fname = (char *)malloc(len * 2 + 1);
db_escape_string(ua->jcr, ua->db, fname, ua->cmd, len);
- Mmsg(rx->query, uar_file[db_type], rx->ClientName, fname);
+ Mmsg(rx->query, uar_file[db_get_type_index(ua->db)], rx->ClientName, fname);
free(fname);
gui_save = ua->jcr->gui;
ua->jcr->gui = true;
ua->error_msg(_("No JobId specified cannot continue.\n"));
return false;
} else {
- Mmsg(rx->query, uar_jobid_fileindex_from_dir[db_type], rx->JobIds, dir, rx->ClientName);
+ Mmsg(rx->query, uar_jobid_fileindex_from_dir[db_get_type_index(ua->db)], rx->JobIds, dir, rx->ClientName);
}
rx->found = false;
/* Find and insert jobid and File Index */
/* Create temp tables */
db_sql_query(ua->db, uar_del_temp, NULL, NULL);
db_sql_query(ua->db, uar_del_temp1, NULL, NULL);
- if (!db_sql_query(ua->db, uar_create_temp[db_type], NULL, NULL)) {
+ if (!db_sql_query(ua->db, uar_create_temp[db_get_type_index(ua->db)], NULL, NULL)) {
ua->error_msg("%s\n", db_strerror(ua->db));
}
- if (!db_sql_query(ua->db, uar_create_temp1[db_type], NULL, NULL)) {
+ if (!db_sql_query(ua->db, uar_create_temp1[db_get_type_index(ua->db)], NULL, NULL)) {
ua->error_msg("%s\n", db_strerror(ua->db));
}
/*
#include "bacula.h"
#include "jcr.h"
-sql_query p_sql_query = NULL;
-sql_escape p_sql_escape = NULL;
+sql_query_func p_sql_query = NULL;
+sql_escape_func p_sql_escape = NULL;
#define FULL_LOCATION 1 /* set for file:line in Debug messages */
/* Allow only one thread to tweak d->fd at a time */
static pthread_mutex_t fides_mutex = PTHREAD_MUTEX_INITIALIZER;
static MSGS *daemon_msgs; /* global messages */
-static char *catalog_db = NULL; /* database type */
static void (*message_callback)(int type, char *msg) = NULL;
static FILE *trace_fd = NULL;
#if defined(HAVE_WIN32)
}
}
-const char *
-get_db_type(void)
-{
- return catalog_db != NULL ? catalog_db : "unknown";
-}
-
-void
-set_db_type(const char *name)
-{
- if (catalog_db != NULL) {
- free(catalog_db);
- }
- catalog_db = bstrdup(name);
-}
-
/*
* Initialize message handler for a daemon or a Job
* We make a copy of the MSGS resource passed, so it belows
fclose(trace_fd);
trace_fd = NULL;
}
- if (catalog_db) {
- free(catalog_db);
- catalog_db = NULL;
- }
term_last_jobs_list();
}
bool get_trace(void);
struct B_DB;
-typedef void (*sql_query)(JCR *jcr, const char *cmd);
-typedef void (*sql_escape)(JCR *jcr, B_DB* db, char *snew, char *old, int len);
+typedef void (*sql_query_func)(JCR *jcr, const char *cmd);
+typedef void (*sql_escape_func)(JCR *jcr, B_DB* db, char *snew, char *old, int len);
-extern DLL_IMP_EXP sql_query p_sql_query;
-extern DLL_IMP_EXP sql_escape p_sql_escape;
+extern DLL_IMP_EXP sql_query_func p_sql_query;
+extern DLL_IMP_EXP sql_escape_func p_sql_escape;
extern DLL_IMP_EXP int debug_level;
extern DLL_IMP_EXP bool dbg_timestamp; /* print timestamp in debug output */
$(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) \
-I$(basedir) $(DINCLUDE) $(CFLAGS) $<
-bscan: Makefile $(SCNOBJS) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE)
+bscan: Makefile $(SCNOBJS) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE)
$(LIBTOOL_LINK) $(CXX) $(TTOOL_LDFLAGS) $(LDFLAGS) -L../lib -L../cats -L../findlib -o $@ $(SCNOBJS) \
- -lbacsql $(DB_LIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
+ -lbacsql -lbaccats $(DB_LIBS) $(ZLIBS) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
bcopy.o: bcopy.c
@echo "Compiling $<"
#include "stored.h"
#include "findlib/find.h"
#include "cats/cats.h"
+#include "cats/sql_glue.h"
/* Dummy functions */
int generate_daemon_event(JCR *jcr, const char *event) { return 1; }
edit_uint64(currentVolumeSize, ed1));
}
- if ((db=db_init(NULL, db_driver, db_name, db_user, db_password,
- db_host, db_port, NULL, 0)) == NULL) {
+ if ((db = db_init_database(NULL, db_driver, db_name, db_user, db_password,
+ db_host, db_port, NULL, false, false)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
if (!db_open_database(NULL, db)) {
DEBUG=@DEBUG@
+DB_LIBS=@DB_LIBS@
+
first_rule: all
dummy:
bregtest: Makefile bregtest.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE)
$(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -o $@ bregtest.o -lbac -lm $(DLIB) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
-dbcheck: Makefile dbcheck.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) $(DIRCONFOBJS)
+dbcheck: Makefile dbcheck.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) $(DIRCONFOBJS)
$(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../cats -o $@ dbcheck.o $(DIRCONFOBJS) \
- -lbacsql -lbaccfg -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
+ -lbacsql -lbaccats -lbaccfg -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
fstype: Makefile fstype.o ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE)
$(LIBTOOL_LINK) $(CXX) $(LDFLAGS) -L../lib -L../findlib -o $@ fstype.o -lbacfind -lbac -lm \
inc_conf.o: ../dird/inc_conf.c
$(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(PYTHON_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
-testfind: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(FINDOBJS)
+testfind: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) \
+ ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) $(FINDOBJS)
$(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -o $@ $(FINDOBJS) -L. -L../lib -L../findlib \
$(DLIB) -lbacfind -lbaccfg -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
$(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L. -L../lib -o $@ bwild.o \
$(DLIB) -lbac -lm $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
-bbatch: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) bbatch.o
+bbatch: Makefile ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) bbatch.o
$(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -o $@ bbatch.o \
- -lbacsql -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
+ -lbacsql -lbaccats -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
-bvfs_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) bvfs_test.o
+bvfs_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) bvfs_test.o
$(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ bvfs_test.o \
- -lbacsql -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
+ -lbacsql -lbaccats -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
-ing_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ing_test.o
+ing_test: Makefile ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) \
+ ../cats/libbacsql$(DEFAULT_ARCHIVE_TYPE) ../cats/libbaccats$(DEFAULT_ARCHIVE_TYPE) ing_test.o
$(LIBTOOL_LINK) $(CXX) -g $(LDFLAGS) -L../cats -L. -L../lib -L../findlib -o $@ ing_test.o \
- -lbacsql -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
+ -lbacsql -lbaccats -lbacfind -lbac -lm $(DB_LIBS) $(LIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS)
cats_test.o: cats_test.o
echo "Compiling $<"
#include "stored/stored.h"
#include "findlib/find.h"
#include "cats/cats.h"
+#include "cats/sql_glue.h"
/* Forward referenced functions */
static void *do_batch(void *);
btime_t start, end;
/* To use the -r option, the catalog should already contains records */
- if ((db=db_init(NULL, NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
+ if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password,
+ db_host, 0, NULL, false, false)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
if (!db_open_database(NULL, db)) {
bjcr->fileset_md5 = get_pool_memory(PM_FNAME);
pm_strcpy(bjcr->fileset_md5, "Dummy.fileset.md5");
- if ((db=db_init(NULL, NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
+ if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password,
+ db_host, 0, NULL, false, false)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
if (!db_open_database(NULL, db)) {
#include "bacula.h"
#include "cats/cats.h"
+#include "cats/sql_glue.h"
#include "cats/bvfs.h"
#include "findlib/find.h"
pm_strcpy(bjcr->client_name, "Dummy.Client.Name");
bstrncpy(bjcr->Job, "bvfs_test", sizeof(bjcr->Job));
- if ((db=db_init_database(NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
+ if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password,
+ db_host, 0, NULL, false, false)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
- Dmsg1(0, "db_type=%s\n", db_get_type());
+ Dmsg1(0, "db_type=%s\n", db_get_type(db));
if (!db_open_database(NULL, db)) {
Emsg0(M_ERROR_TERM, 0, db_strerror(db));
#include "bacula.h"
#include "cats/cats.h"
+#include "cats/sql_glue.h"
#include "lib/runscript.h"
#include "dird/dird_conf.h"
int ch;
const char *user, *password, *db_name, *dbhost;
int dbport = 0;
- bool test_thread=false;
bool print_catalog=false;
char *configfile = NULL;
char *catalogname = NULL;
memset(&id_list, 0, sizeof(id_list));
memset(&name_list, 0, sizeof(name_list));
- while ((ch = getopt(argc, argv, "bc:C:d:fvBt?")) != -1) {
+ while ((ch = getopt(argc, argv, "bc:C:d:fvB?")) != -1) {
switch (ch) {
case 'B':
print_catalog = true; /* get catalog information from config */
case 'v':
verbose++;
break;
- case 't':
- test_thread=true;
- break;
case '?':
default:
OSDependentInit();
- if (test_thread) {
- /* When we will load the SQL backend with ldopen, this check would be
- * moved after the database initialization. It will need a valid config
- * file.
- */
- db_check_backend_thread_safe();
- Pmsg0(0, _("OK - DB backend seems to be thread-safe.\n"));
- exit(0);
- }
-
if (configfile) {
CAT *catalog = NULL;
int found = 0;
/* Print catalog information and exit (-B) */
if (print_catalog) {
POOLMEM *buf = get_pool_memory(PM_MESSAGE);
- printf("%sdb_type=%s\nworking_dir=%s\n", catalog->display(buf),
- db_get_type(), working_directory);
+ printf("%s\nworking_dir=%s\n", catalog->display(buf),
+ working_directory);
free_pool_memory(buf);
exit(0);
}
}
/* Open database */
- db = db_init_database(NULL, db_name, user, password, dbhost, dbport, NULL, 0);
+ db = db_init_database(NULL, NULL, db_name, user, password, dbhost, dbport, NULL, false, false);
if (!db_open_database(NULL, db)) {
Emsg1(M_FATAL, 0, "%s", db_strerror(db));
return 1;
*/
#include "bacula.h"
#include "cats/cats.h"
-#include "cats/bvfs.h"
-#include "findlib/find.h"
+#include "cats/sql_glue.h"
/* Local variables */
static B_DB *db;
usage();
}
- if ((db=db_init_database(NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
+ if ((db = db_init_database(NULL, NULL, db_name, db_user, db_password,
+ db_host, 0, NULL, false, false)) == NULL) {
Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
- Dmsg1(0, "db_type=%s\n", db_get_type());
+ Dmsg1(0, "db_type=%s\n", db_get_type(db));
if (!db_open_database(NULL, db)) {
Emsg0(M_ERROR_TERM, 0, db_strerror(db));
/* #define LIBBACCFG_LT_RELEASE "5.1.0" */
/* #define LIBBACPY_LT_RELEASE "5.1.0" */
/* #define LIBBACSQL_LT_RELEASE "5.1.0" */
+/* #define LIBBACCATS_LT_RELEASE "5.1.0" */
/* #define LIBBACFIND_LT_RELEASE "5.1.0" */