src/win32/debug
src/win32/release32
src/win32/release64
+src/win32/dll
# src/win32
src/win32*.user
fi
fi
SQL_INCLUDE=-I$INGRES_INCDIR
- SQL_LFLAGS="-L$INGRES_LIBDIR -lingres"
+ SQL_LFLAGS="-L$INGRES_LIBDIR -lq.1 -lcompat.1 -lframe.1"
SQL_BINDIR=$INGRES_BINDIR
SQL_LIB=$INGRES_LIBDIR/libingres.a
AC_DEFINE(HAVE_INGRES, 1, [Set if have Ingres Database])
AC_MSG_RESULT(no)
AC_MSG_ERROR(Invalid PostgreSQL directory $withval - unable to find libpq-fe.h under $withval)
fi
+ AC_DEFINE(HAVE_POSTGRESQL)
+ AC_MSG_RESULT(yes)
POSTGRESQL_LFLAGS="-L$POSTGRESQL_LIBDIR -lpq"
AC_CHECK_FUNC(crypt, , AC_CHECK_LIB(crypt, crypt, [POSTGRESQL_LFLAGS="$POSTGRESQL_LFLAGS -lcrypt"]))
SQL_INCLUDE=-I$POSTGRESQL_INCDIR
SQL_BINDIR=$POSTGRESQL_BINDIR
SQL_LIB=$POSTGRESQL_LIBDIR/libpq.a
- AC_DEFINE(HAVE_POSTGRESQL)
- AC_MSG_RESULT(yes)
db_found=yes
support_postgresql=yes
db_type=PostgreSQL
#! /bin/sh
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# Free Software Foundation, Inc.
-timestamp='2007-07-22'
+timestamp='2009-06-10'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
+ | grep -q __ELF__
then
# Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
# Return netbsd for either. FIX?
case `/usr/bin/uname -p` in
sparc) echo sparc-icl-nx7; exit ;;
esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
sun4H:SunOS:5.*:*)
echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
- echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4*:SunOS:6*:*)
# According to config.sub, this is the proper way to canonicalize
echo rs6000-ibm-aix3.2
fi
exit ;;
- *:AIX:*:[45])
+ *:AIX:*:[456])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
# => hppa64-hp-hpux11.23
if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
+ grep -q __LP64__
then
HP_ARCH="hppa2.0w"
else
x86)
echo i586-pc-interix${UNAME_RELEASE}
exit ;;
- EM64T | authenticamd)
+ EM64T | authenticamd | genuineintel)
echo x86_64-unknown-interix${UNAME_RELEASE}
exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
esac ;;
[345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
echo i${UNAME_MACHINE}-pc-mks
exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
echo ${UNAME_MACHINE}-pc-minix
exit ;;
arm*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ fi
exit ;;
avr32*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
m68*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
+ mips:Linux:*:* | mips64:Linux:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#undef CPU
- #undef mips64
- #undef mips64el
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
+ CPU=${UNAME_MACHINE}el
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
+ CPU=${UNAME_MACHINE}
#else
CPU=
#endif
EV67) UNAME_MACHINE=alphaev67 ;;
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ objdump --private-headers /bin/sh | grep -q ld.so.1
if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-gnu
+ exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
x86_64:Linux:*:*)
echo x86_64-unknown-linux-gnu
exit ;;
- xtensa:Linux:*:*)
- echo xtensa-unknown-linux-gnu
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
# The BFD linker knows what the default object file format is, so
elf32-i386)
TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- coff-i386)
- echo "${UNAME_MACHINE}-pc-linux-gnucoff"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
esac
# Determine whether the default compiler is a.out or elf
eval $set_cc_for_build
i*86:syllable:*:*)
echo ${UNAME_MACHINE}-pc-syllable
exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
echo i386-unknown-lynxos${UNAME_RELEASE}
exit ;;
i*86:*DOS:*:*)
pc:*:*:*)
# Left here for compatibility:
# uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i386.
- echo i386-pc-msdosdjgpp
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
exit ;;
Intel:Mach:3*:*)
echo i386-pc-mach3
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
echo m68k-unknown-lynxos${UNAME_RELEASE}
exit ;;
rs6000:LynxOS:2.*:*)
echo rs6000-unknown-lynxos${UNAME_RELEASE}
exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
echo powerpc-unknown-lynxos${UNAME_RELEASE}
exit ;;
SM[BE]S:UNIX_SV:*:*)
BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
echo i586-pc-beos
exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
i*86:rdos:*:*)
echo ${UNAME_MACHINE}-pc-rdos
exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
esac
#echo '(No uname command or uname output not recognized.)' 1>&2
the operating system you are using. It is advised that you
download the most up to date version of the config scripts from
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
and
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
If the version you run ($0) is already up to date, please
send the following data and any information you think might be
/* Normal acl support */
#undef HAVE_ACL
+/* Defines if your system has AFS support */
+#undef HAVE_AFS
+
+/* Andrew FileSystem ACL support */
+#undef HAVE_AFS_ACL
+
+/* Define to 1 if you have the <afs/stds.h> header file. */
+#undef HAVE_AFS_STDS_H
+
/* Define to 1 if you have `alloca', as a function or macro. */
#undef HAVE_ALLOCA
/* Define to 1 if you have the `getmntent' function. */
#undef HAVE_GETMNTENT
+/* Define to 1 if you have the `getmntinfo' function. */
+#undef HAVE_GETMNTINFO
+
/* Define to 1 if you have the `getpagesize' function. */
#undef HAVE_GETPAGESIZE
#! /bin/sh
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# Free Software Foundation, Inc.
-timestamp='2007-06-28'
+timestamp='2009-06-11'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
case $maybe_os in
nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
os=
basic_machine=$1
;;
+ -bluegene*)
+ os=-cnk
+ ;;
-sim | -cisco | -oki | -wec | -winbond)
os=
basic_machine=$1
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
+ | lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep \
+ | maxq | mb | microblaze | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
- | mips64vr | mips64vrel \
+ | mips64octeon | mips64octeonel \
| mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
| mips64vr4100 | mips64vr4100el \
| mips64vr4300 | mips64vr4300el \
| mips64vr5000 | mips64vr5000el \
| mipsisa64sr71k | mipsisa64sr71kel \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
+ | moxie \
| mt \
| msp430 \
| nios | nios2 \
| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
| pyramid \
| score \
- | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
| v850 | v850e \
| we32k \
| x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k)
+ | z8k | z80)
basic_machine=$basic_machine-unknown
;;
m6811 | m68hc11 | m6812 | m68hc12)
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
+ | lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
- | mips64vr-* | mips64vrel-* \
+ | mips64octeon-* | mips64octeonel-* \
| mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
| mips64vr4100-* | mips64vr4100el-* \
| mips64vr4300-* | mips64vr4300el-* \
| mips64vr5000-* | mips64vr5000el-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
| pyramid-* \
| romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
| tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
| tron-* \
| v850-* | v850e-* | vax-* \
| we32k-* \
| x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa-* \
+ | xstormy16-* | xtensa*-* \
| ymp-* \
- | z8k-*)
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
;;
# Recognize the various machine names and aliases which stand
# for a CPU type and a company and sometimes even an OS.
basic_machine=m68k-apollo
os=-bsd
;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
aux)
basic_machine=m68k-apple
os=-aux
basic_machine=ns32k-sequent
os=-dynix
;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
c90)
basic_machine=c90-cray
os=-unicos
;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
convex-c1)
basic_machine=c1-convex
os=-bsd
basic_machine=m88k-motorola
os=-sysv3
;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
djgpp)
basic_machine=i586-pc
os=-msdosdjgpp
basic_machine=m68k-isi
os=-sysv
;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
m88k-omron*)
basic_machine=m88k-omron
;;
basic_machine=i860-intel
os=-osf
;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
pbd)
basic_machine=sparc-tti
;;
basic_machine=tic6x-unknown
os=-coff
;;
+ tile*)
+ basic_machine=tile-unknown
+ os=-linux-gnu
+ ;;
tx39)
basic_machine=mipstx39-unknown
;;
basic_machine=z8k-unknown
os=-sim
;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
none)
basic_machine=none-none
os=-none
we32k)
basic_machine=we32k-att
;;
- sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
basic_machine=sh-unknown
;;
sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
# Each alternative MUST END IN A *, to match a version number.
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -kopensolaris* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* \
+ | -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
| -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
-zvmoe)
os=-zvmoe
;;
+ -dicos*)
+ os=-dicos
+ ;;
-none)
;;
*)
-sunos*)
vendor=sun
;;
- -aix*)
+ -cnk*|-aix*)
vendor=ibm
;;
-beos*)
VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h`
DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h`
LSMDATE=`sed -n -e 's/^.*LSMDATE.*"\(.*\)"$/\1/p' ${srcdir}/src/version.h`
+BDB_VERSION=`sed -n -e 's/^.*BDB_VERSION \(.*\)$/\1/p' ${srcdir}/src/cats/cats.h`
AC_SUBST(VERSION)dnl
AC_SUBST(DATE)dnl
AC_SUBST(LSMDATE)dnl
AC_SUBST(BACULA)dnl
AC_SUBST(post_host)dnl
+AC_SUBST(BDB_VERSION)dnl
dnl src/lib
dnl can be overwritten by specific values from version.h
fi
TERM_LIB=""
-AC_CHECK_HEADER(termcap.h,
- [ AC_CHECK_LIB(termcap, tgetent,
- [ TERM_LIB="-ltermcap" ],
- [ AC_CHECK_LIB(ncurses, tgetent, [ TERM_LIB="-lncurses" ])
+AC_CHECK_HEADER(curses.h,
+ [ AC_CHECK_LIB(ncurses, tgetent,
+ [ TERM_LIB="-lncurses" ],
+ [ AC_CHECK_LIB(termcap, tgetent, [ TERM_LIB="-ltermcap" ])
])
],
[ AC_CHECK_HEADERS(curses.h)
OPENSSL_LIBS=""
OPENSSL_INC=""
fi
+AC_MSG_RESULT([$support_tls])
if test "$support_tls" = "no"; then
OPENSSL_LIBS=""
OPENSSL_INC=""
fi
-AC_MSG_RESULT([$support_tls])
AC_SUBST(OPENSSL_LIBS)
AC_SUBST(OPENSSL_INC)
AC_SUBST(uncomment_dbi)
+dnl For Ingres always enable batch inserts.
+if test x$DB_TYPE = xingres; then
+ support_batch_insert=yes
+fi
+
if test $support_batch_insert = yes ; then
AC_DEFINE(HAVE_BATCH_FILE_INSERT, 1, [Set if DB batch insert code enabled])
fi
AC_FUNC_VPRINTF
AC_FUNC_ALLOCA
AC_FUNC_GETMNTENT
+AC_CHECK_FUNCS(getmntinfo, [AC_DEFINE(HAVE_GETMNTINFO)])
AC_FUNC_CLOSEDIR_VOID
AC_FUNC_SETPGRP dnl check for BSD setpgrp.
# AC_FUNC_FNMATCH dnl use local version
AC_CHECK_LIB(sun, getpwnam)
AC_CHECK_HEADERS(zlib.h)
-AC_CHECK_LIB(z, deflate, [FDLIBS="-lz"])
+AC_CHECK_LIB(z, deflate, [ZLIBS="-lz"])
have_zlib=no
-if test x$FDLIBS = x-lz; then
+if test x$ZLIBS = x-lz; then
AC_DEFINE(HAVE_LIBZ)
have_zlib=yes
fi
+AC_SUBST(ZLIBS)
+
+dnl
+dnl Check if we have AFS on this system
+dnl
+AFS_CFLAGS=""
+AFS_LIBS=""
+support_afs=auto
+AC_ARG_ENABLE(afs,
+ AC_HELP_STRING([--disable-afs], [disable afs support @<:@default=auto@:>@]),
+ [
+ if test x$enableval = xyes; then
+ support_afs=yes
+ elif test x$enableval = xno; then
+ support_afs=no
+ fi
+ ]
+)
+
+have_afs=no
+if test x$support_afs = xyes -o x$support_afs = xauto; then
+ AC_ARG_WITH(afsdir,
+ AC_HELP_STRING([--with-afsdir@<:@=DIR@:>@], [Directory holding AFS includes/libs]),
+ with_afsdir=$withval
+ )
+
+ dnl
+ dnl Search in standard places, or --with-afsdir not specified
+ dnl
+ if test x$with_afsdir = x; then
+ for root in /usr /usr/local; do
+ if test -d ${root}/include/afs/ ; then
+ with_afsdir=${root}
+ break
+ fi
+ done
+ fi
+
+ AFS_CFLAGS="-I${with_afsdir}/include"
+
+ saved_CFLAGS="${CFLAGS}"
+ CFLAGS="${AFS_CFLAGS} ${saved_CFLAGS}"
+
+ AC_CHECK_HEADERS(afs/stds.h)
+
+ CFLAGS="${saved_CFLAGS}"
+
+ dnl
+ dnl See if we can find a libsys with the pioctl symbol in there
+ dnl
+ for dir in ${with_afsdir}/lib ${with_afsdir}/lib/afs
+ do
+ for arch_type in .a .so
+ do
+ A=`test -f ${dir}/libsys${arch_type} && nm ${dir}/libsys${arch_type} | grep pioctl`
+ pkg=$?
+ if test $pkg = 0; then
+ have_afs=yes
+ AFS_LIBS="-L${dir} -lsys -lrx -llwp ${dir}/util.a"
+ break
+ fi
+ done
+ done
+
+ if test x$support_afs = xyes -a $have_afs != yes; then
+ AC_MSG_ERROR([afs support explicitly enabled but no supported afs implementation found,
+ please either load the afs libraries or rerun configure without --enable-afs])
+ else
+ if test $have_afs = yes; then
+ AC_DEFINE([HAVE_AFS],1,[Defines if your system has AFS support])
+ AC_DEFINE([HAVE_AFS_ACL],1,[Andrew FileSystem ACL support])
+ fi
+ fi
+fi
+
+AC_SUBST(AFS_CFLAGS)
+AC_SUBST(AFS_LIBS)
dnl
dnl Check for ACL support and libraries
AC_CHECK_LIB(acl, acl_get_file,
[
have_acl=yes;
- FDLIBS="-lacl $FDLIBS"
+ if test $have_afs = yes; then
+ dnl
+ dnl Because of possible naming conflict with AFS libacl make sure we use the one in /usr/lib64 or /usr/lib !!!
+ dnl
+ if test -d /usr/lib64/; then
+ FDLIBS="-L/usr/lib64 -lacl $FDLIBS"
+ else
+ FDLIBS="-L/usr/lib -lacl $FDLIBS"
+ fi
+ else
+ FDLIBS="-lacl $FDLIBS"
+ fi
], [
AC_CHECK_LIB(pacl, acl_get_file,
[
platforms/suse/Makefile \
platforms/suse/bacula-fd \
platforms/suse/bacula-sd \
- platforms/suse/bacula-dir"
+ platforms/suse/bacula-dir \
+ platforms/suse/bacula"
;;
suse5)
DISTNAME=suse
src/cats/drop_ingres_database \
src/cats/sqlite \
src/cats/mysql \
- src/cats/create_bdb_database \
- src/cats/update_bdb_tables \
- src/cats/make_bdb_tables \
- src/cats/grant_bdb_privileges \
- src/cats/drop_bdb_tables \
- src/cats/drop_bdb_database \
src/cats/create_bacula_database \
src/cats/update_bacula_tables \
src/cats/grant_bacula_privileges \
c=updatedb
chmod 755 $c/update_mysql_tables_10_to_11 $c/update_sqlite3_tables_10_to_11
chmod 755 $c/update_postgresql_tables_10_to_11
+chmod 755 $c/update_mysql_tables_11_to_12 $c/update_sqlite3_tables_11_to_12
+chmod 755 $c/update_postgresql_tables_11_to_12
+
c=src/cats
build-dird: ${build_dird}
build-stored: ${build_stored}
Plugin support: ${have_plugins}
+ AFS support: ${have_afs}
ACL support: ${have_acl}
XATTR support: ${have_xattr}
Python support: ${support_python} ${PYTHON_LIBS}
Priority:
================
+- Add external command to lookup hostname (eg nmblookup timmy-win7)
+nmblookup gato
+querying gato on 127.255.255.255
+querying gato on 192.168.1.255
+ 192.168.1.8 gato<00>
+ 192.168.1.11 gato<00>
+ 192.168.1.8 gato<00>
+ 192.168.1.11 gato<00>
- Possibly allow SD to spool even if a tape is not mounted.
- How to sync remote offices.
- Windows Bare Metal
Source: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz
# define the basic package description
-%define blurb Bacula - It comes by night and sucks the vital essence from your computers.
+%define blurb Bacula - The Leading Open Source Backup Solution.
%define blurb2 Bacula is a set of computer programs that permit you (or the system
%define blurb3 administrator) to manage backup, recovery, and verification of computer
%define blurb4 data across a network of computers of different kinds. In technical terms,
Source3: http://www.prdownloads.sourceforge.net/bacula/depkgs-%{depkgs_version}.tar.gz
# define the basic package description
-%define blurb Bacula - It comes by night and sucks the vital essence from your computers.
+%define blurb Bacula - The Leading Open Source Backup Solution.
%define blurb2 Bacula is a set of computer programs that permit you (or the system
%define blurb3 administrator) to manage backup, recovery, and verification of computer
%define blurb4 data across a network of computers of different kinds. In technical terms,
Projects:
Bacula Projects Roadmap
- Status updated 14 Jun 2009
+ Status updated 25 February 2010
Summary:
* => item complete
- Item 1: Ability to restart failed jobs
-*Item 2: 'restore' menu: enter a JobId, automatically select dependents
- Item 3: Scheduling syntax that permits more flexibility and options
- Item 4: Data encryption on storage daemon
-*Item 5: Deletion of disk Volumes when pruned (partial -- truncate when pruned)
-*Item 6: Implement Base jobs
- Item 7: Add ability to Verify any specified Job.
- Item 8: Improve Bacula's tape and drive usage and cleaning management
- Item 9: Allow FD to initiate a backup
-*Item 10: Restore from volumes on multiple storage daemons
- Item 11: Implement Storage daemon compression
- Item 12: Reduction of communications bandwidth for a backup
- Item 13: Ability to reconnect a disconnected comm line
- Item 14: Start spooling even when waiting on tape
-*Item 15: Enable/disable compression depending on storage device (disk/tape)
- Item 16: Include all conf files in specified directory
- Item 17: Multiple threads in file daemon for the same job
- Item 18: Possibilty to schedule Jobs on last Friday of the month
- Item 19: Include timestamp of job launch in "stat clients" output
-*Item 20: Cause daemons to use a specific IP address to source communications
- Item 21: Message mailing based on backup types
- Item 22: Ability to import/export Bacula database entities
-*Item 23: "Maximum Concurrent Jobs" for drives when used with changer device
- Item 24: Implementation of running Job speed limit.
- Item 25: Add an override in Schedule for Pools based on backup types
- Item 26: Automatic promotion of backup levels based on backup size
- Item 27: Allow inclusion/exclusion of files in a fileset by creation/mod times
- Item 28: Archival (removal) of User Files to Tape
- Item 29: An option to operate on all pools with update vol parameters
- Item 30: Automatic disabling of devices
-*Item 31: List InChanger flag when doing restore.
- Item 32: Ability to defer Batch Insert to a later time
- Item 33: Add MaxVolumeSize/MaxVolumeBytes statement to Storage resource
- Item 34: Enable persistent naming/number of SQL queries
-*Item 35: Port bat to Win32
- Item 36: Bacula Dir, FD and SD to support proxies
- Item 37: Add Minumum Spool Size directive
- Item 38: Backup and Restore of Windows Encrypted Files using Win raw encryption
- Item 39: Implement an interface between Bacula and Amazon's S3.
- Item 40: Convert Bacula existing tray monitor on Windows to a stand alone program
+Item 1: Ability to restart failed jobs
+Item 2: Scheduling syntax that permits more flexibility and options
+Item 3: Data encryption on storage daemon
+Item 4: Add ability to Verify any specified Job.
+Item 5: Improve Bacula's tape and drive usage and cleaning management
+Item 6: Allow FD to initiate a backup
+Item 7: Implement Storage daemon compression
+Item 8: Reduction of communications bandwidth for a backup
+Item 9: Ability to reconnect a disconnected comm line
+Item 10: Start spooling even when waiting on tape
+Item 11: Include all conf files in specified directory
+Item 12: Multiple threads in file daemon for the same job
+Item 13: Possibilty to schedule Jobs on last Friday of the month
+Item 14: Include timestamp of job launch in "stat clients" output
+Item 15: Message mailing based on backup types
+Item 16: Ability to import/export Bacula database entities
+Item 17: Implementation of running Job speed limit.
+Item 18: Add an override in Schedule for Pools based on backup types
+Item 19: Automatic promotion of backup levels based on backup size
+Item 20: Allow FileSet inclusion/exclusion by creation/mod times
+Item 21: Archival (removal) of User Files to Tape
+Item 22: An option to operate on all pools with update vol parameters
+Item 23: Automatic disabling of devices
+Item 24: Ability to defer Batch Insert to a later time
+Item 25: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
+Item 26: Enable persistent naming/number of SQL queries
+Item 27: Bacula Dir, FD and SD to support proxies
+Item 28: Add Minumum Spool Size directive
+Item 29: Handle Windows Encrypted Files using Win raw encryption
+Item 30: Implement a Storage device like Amazon's S3.
+Item 31: Convert tray monitor on Windows to a stand alone program
+Item 32: Relabel disk volume after recycling
+Item 33: Command that releases all drives in an autochanger
+Item 34: Run bscan on a remote storage daemon from within bconsole.
+Item 35: Implement a Migration job type that will create a reverse
+Item 36: Job migration between different SDs
+Item 37: Concurrent spooling and despooling withini a single job.
+Item 39: Extend the verify code to make it possible to verify
+Item 40: Separate "Storage" and "Device" in the bacula-dir.conf
+Item 41: Least recently used device selection for tape drives in autochanger.
+
Item 1: Ability to restart failed jobs
Date: 26 April 2009
volume of data or files stored on Volume before enabling.
-Item 2: 'restore' menu: enter a JobId, automatically select dependents
-Origin: Graham Keeling (graham@equiinet.com)
-Date: 13 March 2009
-Status: Done in 3.0.2
-
-What: Add to the bconsole 'restore' menu the ability to select a job
- by JobId, and have bacula automatically select all the
- dependent jobs.
-
- Why: Currently, you either have to...
-
- a) laboriously type in a date that is greater than the date of the
- backup that you want and is less than the subsequent backup (bacula
- then figures out the dependent jobs), or
- b) manually figure out all the JobIds that you want and laboriously
- type them all in. It would be extremely useful (in a programmatical
- sense, as well as for humans) to be able to just give it a single JobId
- and let bacula do the hard work (work that it already knows how to do).
-
- Notes (Kern): I think this should either be modified to have Bacula
- print a list of dates that the user can choose from as is done in
- bwx-console and bat or the name of this command must be carefully
- chosen so that the user clearly understands that the JobId is being
- used to specify what Job and the date to which he wishes the restore to
- happen.
-
-
-Item 3: Scheduling syntax that permits more flexibility and options
+Item 2: Scheduling syntax that permits more flexibility and options
Date: 15 December 2006
Origin: Gregory Brauer (greg at wildbrain dot com) and
Florian Schnabel <florian.schnabel at docufy dot de>
jobs (via Schedule syntax) into this.
-Item 4: Data encryption on storage daemon
+Item 3: Data encryption on storage daemon
Origin: Tobias Barth <tobias.barth at web-arts.com>
Date: 04 February 2009
Status: new
http://www.mail-archive.com/bacula-users@lists.sourceforge.net/msg28860.html
-Item 5: Deletion of disk Volumes when pruned
- Date: Nov 25, 2005
- Origin: Ross Boylan <RossBoylan at stanfordalumni dot org> (edited
- by Kern)
- Status: Truncate operation implemented in 3.1.4
-
- What: Provide a way for Bacula to automatically remove Volumes
- from the filesystem, or optionally to truncate them.
- Obviously, the Volume must be pruned prior removal.
-
- Why: This would allow users more control over their Volumes and
- prevent disk based volumes from consuming too much space.
-
- Notes: The following two directives might do the trick:
-
- Volume Data Retention = <time period>
- Remove Volume After = <time period>
-
- The migration project should also remove a Volume that is
- migrated. This might also work for tape Volumes.
-
- Notes: (Kern). The data fields to control this have been added
- to the new 3.0.0 database table structure.
-
-
-Item 6: Implement Base jobs
- Date: 28 October 2005
- Origin: Kern
- Status:
-
- What: A base job is sort of like a Full save except that you
- will want the FileSet to contain only files that are
- unlikely to change in the future (i.e. a snapshot of
- most of your system after installing it). After the
- base job has been run, when you are doing a Full save,
- you specify one or more Base jobs to be used. All
- files that have been backed up in the Base job/jobs but
- not modified will then be excluded from the backup.
- During a restore, the Base jobs will be automatically
- pulled in where necessary.
-
- Why: This is something none of the competition does, as far as
- we know (except perhaps BackupPC, which is a Perl program that
- saves to disk only). It is big win for the user, it
- makes Bacula stand out as offering a unique
- optimization that immediately saves time and money.
- Basically, imagine that you have 100 nearly identical
- Windows or Linux machine containing the OS and user
- files. Now for the OS part, a Base job will be backed
- up once, and rather than making 100 copies of the OS,
- there will be only one. If one or more of the systems
- have some files updated, no problem, they will be
- automatically restored.
-
- Notes: Huge savings in tape usage even for a single machine.
- Will require more resources because the DIR must send
- FD a list of files/attribs, and the FD must search the
- list and compare it for each file to be saved.
-
-
-Item 7: Add ability to Verify any specified Job.
+Item 4: Add ability to Verify any specified Job.
Date: 17 January 2008
Origin: portrix.net Hamburg, Germany.
Contact: Christian Sabelmann
Jobs whose file information are still in the catalog.
-Item 8: Improve Bacula's tape and drive usage and cleaning management
+Item 5: Improve Bacula's tape and drive usage and cleaning management
Date: 8 November 2005, November 11, 2005
Origin: Adam Thornton <athornton at sinenomine dot net>,
Arno Lehmann <al at its-lehmann dot de>
volumes, and handling drive cleaning and TAPEALERTs.
-Item 9: Allow FD to initiate a backup
+Item 6: Allow FD to initiate a backup
Origin: Frank Volf (frank at deze dot org)
Date: 17 November 2005
Status:
-What: Provide some means, possibly by a restricted console that
+What: Provide some means, possibly by a restricted console that
allows a FD to initiate a backup, and that uses the connection
established by the FD to the Director for the backup so that
a Director that is firewalled can do the backup.
-Why: Makes backup of laptops much easier.
-
-
-Item 10: Restore from volumes on multiple storage daemons
-Origin: Graham Keeling (graham@equiinet.com)
-Date: 12 March 2009
-Status: Done in 3.0.2
-
-What: The ability to restore from volumes held by multiple storage daemons
- would be very useful.
-
-Why: It is useful to be able to backup to any number of different storage
- daemons. For example, your first storage daemon may run out of space,
- so you switch to your second and carry on. Bacula will currently let
- you do this. However, once you come to restore, bacula cannot cope
- when volumes on different storage daemons are required.
-
- Notes: The director knows that more than one storage daemon is needed,
- as bconsole outputs something like the following table.
-
- The job will require the following
- Volume(s) Storage(s) SD Device(s)
- =====================================================================
-
- backup-0001 Disk 1 Disk 1.0
- backup-0002 Disk 2 Disk 2.0
-
- However, the bootstrap file that it creates gets sent to the first
- storage daemon only, which then stalls for a long time, 'waiting for a
- mount request' for the volume that it doesn't have. The bootstrap file
- contains no knowledge of the storage daemon. Under the current design:
-
- The director connects to the storage daemon, and gets an sd_auth_key.
- The director then connects to the file daemon, and gives it the
- sd_auth_key with the 'jobcmd'. (restoring of files happens) The
- director does a 'wait_for_storage_daemon_termination()'. The director
- waits for the file daemon to indicate the end of the job.
-
- With my idea:
-
- The director connects to the file daemon.
- Then, for each storage daemon in the .bsr file... {
- The director connects to the storage daemon, and gets an sd_auth_key.
- The director then connects to the file daemon, and gives it the
- sd_auth_key with the 'storaddr' command.
- (restoring of files happens)
- The director does a 'wait_for_storage_daemon_termination()'.
- The director waits for the file daemon to indicate the end of the
- work on this storage.
- }
-
- The director tells the file daemon that there are no more storages to
- contact. The director waits for the file daemon to indicate the end of
- the job. As you can see, each restore between the file daemon and
- storage daemon is handled in the same way that it is currently handled,
- using the same method for authentication, except that the sd_auth_key
- is moved from the 'jobcmd' to the 'storaddr' command - where it
- logically belongs.
-
-
-Item 11: Implement Storage daemon compression
+Why: Makes backup of laptops much easier.
+Notes: - The FD already has code for the monitor interface
+ - It could be nice to have a .job command that lists authorized
+ jobs.
+ - Commands need to be restricted on the Director side
+ (for example by re-using the runscript flag)
+ - The Client resource can be used to authorize the connection
+ - In a first time, the client can't modify job parameters
+ - We need a way to run a status command to follow job progression
+
+ This project consists of the following points
+ 1. Modify the FD to have a "mini-console" interface that
+ permits it to connect to the Director and start a
+ backup job of itself.
+ 2. The list of jobs that can be started by the FD are
+ defined in the Director (possibly via a restricted
+ console).
+ 3. Modify the existing tray monitor code in the Win32 FD
+ so that it is a separate program from the FD.
+ 4. The tray monitor program should be extended to permit
+ initiating a backup.
+ 5. No new Director directives should be added without
+ prior consultation with the Bacula developers.
+ 6. The comm line used by the FD to connect to the Director
+ should be re-used by the Director to do the backup.
+ This feature is partially implemented in the Director.
+ 7. The FD may have a new directive that allows it to start
+ a backup when the FD starts.
+ 8. The console interface to the FD should be extended to
+ permit a properly authorized console to initiate a
+ backup via the FD.
+
+
+Item 7: Implement Storage daemon compression
Date: 18 December 2006
Origin: Vadim A. Umanski , e-mail umanski@ext.ru
Status:
Notes:
-Item 12: Reduction of communications bandwidth for a backup
+Item 8: Reduction of communications bandwidth for a backup
Date: 14 October 2008
Origin: Robin O'Leary (Equiinet)
Status:
backup that will speed up subsequent backups.
-Item 13: Ability to reconnect a disconnected comm line
+Item 9: Ability to reconnect a disconnected comm line
Date: 26 April 2009
Origin: Kern/Eric
Status:
Notes: *Very* complicated from a design point of view because of authenication.
-Item 14: Start spooling even when waiting on tape
+Item 10: Start spooling even when waiting on tape
Origin: Tobias Barth <tobias.barth@web-arts.com>
Date: 25 April 2008
Status:
implemented.
-Item 15: Enable/disable compression depending on storage device (disk/tape)
- Origin: Ralf Gross ralf-lists@ralfgross.de
- Date: 2008-01-11
- Status: Done
-
- What: Add a new option to the storage resource of the director. Depending
- on this option, compression will be enabled/disabled for a device.
-
- Why: If different devices (disks/tapes) are used for full/diff/incr
- backups, software compression will be enabled for all backups
- because of the FileSet compression option. For backup to tapes
- wich are able to do hardware compression this is not desired.
-
-
- Notes:
- http://news.gmane.org/gmane.comp.sysutils.backup.bacula.devel/cutoff=11124
- It must be clear to the user, that the FileSet compression option
- must still be enabled use compression for a backup job at all.
- Thus a name for the new option in the director must be
- well-defined.
-
- Notes: KES I think the Storage definition should probably override what
- is in the Job definition or vice-versa, but in any case, it must
- be well defined.
-
-
-Item 16: Include all conf files in specified directory
+Item 11: Include all conf files in specified directory
Date: 18 October 2008
Origin: Database, Lda. Maputo, Mozambique
Contact:Cameron Smith / cameron.ord@database.co.mz
/etc/bacula/clientdefs/clientname.conf
-Item 17: Multiple threads in file daemon for the same job
+Item 12: Multiple threads in file daemon for the same job
Date: 27 November 2005
Origin: Ove Risberg (Ove.Risberg at octocode dot com)
Status:
Why: Multiple concurrent backups of a large fileserver with many
disks and controllers will be much faster.
+ Notes: (KES) This is not necessary and could be accomplished
+ by having two jobs. In addition, the current VSS code
+ is single thread.
-Item 18: Possibilty to schedule Jobs on last Friday of the month
+
+Item 13: Possibilty to schedule Jobs on last Friday of the month
Origin: Carsten Menke <bootsy52 at gmx dot net>
Date: 02 March 2008
Status:
Run = pool=Monthly last Day of the Month at 23:50
-Item 19: Include timestamp of job launch in "stat clients" output
+Item 14: Include timestamp of job launch in "stat clients" output
Origin: Mark Bergman <mark.bergman@uphs.upenn.edu>
Date: Tue Aug 22 17:13:39 EDT 2006
Status:
particularly when there are many active clients.
-Item 20: Cause daemons to use a specific IP address to source communications
- Origin: Bill Moran <wmoran@collaborativefusion.com>
- Date: 18 Dec 2006
- Status: Done in 3.0.2
- What: Cause Bacula daemons (dir, fd, sd) to always use the ip address
- specified in the [DIR|DF|SD]Addr directive as the source IP
- for initiating communication.
- Why: On complex networks, as well as extremely secure networks, it's
- not unusual to have multiple possible routes through the network.
- Often, each of these routes is secured by different policies
- (effectively, firewalls allow or deny different traffic depending
- on the source address)
- Unfortunately, it can sometimes be difficult or impossible to
- represent this in a system routing table, as the result is
- excessive subnetting that quickly exhausts available IP space.
- The best available workaround is to provide multiple IPs to
- a single machine that are all on the same subnet. In order
- for this to work properly, applications must support the ability
- to bind outgoing connections to a specified address, otherwise
- the operating system will always choose the first IP that
- matches the required route.
- Notes: Many other programs support this. For example, the following
- can be configured in BIND:
- query-source address 10.0.0.1;
- transfer-source 10.0.0.2;
- Which means queries from this server will always come from
- 10.0.0.1 and zone transfers will always originate from
- 10.0.0.2.
-
-
-Item 21: Message mailing based on backup types
+Item 15: Message mailing based on backup types
Origin: Evan Kaufman <evan.kaufman@gmail.com>
Date: January 6, 2006
Status:
Notes: Kern: This should be rather trivial to implement.
-Item 22: Ability to import/export Bacula database entities
+Item 16: Ability to import/export Bacula database entities
Date: 26 April 2009
Origin: Eric
Status:
other criteria.
-Item 23: "Maximum Concurrent Jobs" for drives when used with changer device
- Origin: Ralf Gross ralf-lists <at> ralfgross.de
- Date: 2008-12-12
- Status: Done in 3.0.3
-
- What: respect the "Maximum Concurrent Jobs" directive in the _drives_
- Storage section in addition to the changer section
-
- Why: I have a 3 drive changer where I want to be able to let 3 concurrent
- jobs run in parallel. But only one job per drive at the same time.
- Right now I don't see how I could limit the number of concurrent jobs
- per drive in this situation.
-
- Notes: Using different priorities for these jobs lead to problems that other
- jobs are blocked. On the user list I got the advice to use the
- "Prefer Mounted Volumes" directive, but Kern advised against using
- "Prefer Mounted Volumes" in an other thread:
- http://article.gmane.org/gmane.comp.sysutils.backup.bacula.devel/11876/
-
- In addition I'm not sure if this would be the same as respecting the
- drive's "Maximum Concurrent Jobs" setting.
-
- Example:
-
- Storage {
- Name = Neo4100
- Address = ....
- SDPort = 9103
- Password = "wiped"
- Device = Neo4100
- Media Type = LTO4
- Autochanger = yes
- Maximum Concurrent Jobs = 3
- }
-
- Storage {
- Name = Neo4100-LTO4-D1
- Address = ....
- SDPort = 9103
- Password = "wiped"
- Device = ULTRIUM-TD4-D1
- Media Type = LTO4
- Maximum Concurrent Jobs = 1
- }
-
- [2 more drives]
-
- The "Maximum Concurrent Jobs = 1" directive in the drive's section is
- ignored.
-
-
-Item 24: Implementation of running Job speed limit.
+Item 17: Implementation of running Job speed limit.
Origin: Alex F, alexxzell at yahoo dot com
Date: 29 January 2009
especially where there is little available.
-Item 25: Add an override in Schedule for Pools based on backup types
+Item 18: Add an override in Schedule for Pools based on backup types
Date: 19 Jan 2005
Origin: Chad Slater <chad.slater@clickfox.com>
Status:
has more capacity (i.e. a 8TB tape library.
-Item 26: Automatic promotion of backup levels based on backup size
+Item 19: Automatic promotion of backup levels based on backup size
Date: 19 January 2006
Origin: Adam Thornton <athornton@sinenomine.net>
Status:
of).
-Item 27: Allow inclusion/exclusion of files in a fileset by creation/mod times
+Item 20: Allow FileSet inclusion/exclusion by creation/mod times
Origin: Evan Kaufman <evan.kaufman@gmail.com>
Date: January 11, 2006
Status:
or 'since'.
-Item 28: Archival (removal) of User Files to Tape
+Item 21: Archival (removal) of User Files to Tape
Date: Nov. 24/2005
Origin: Ray Pengelly [ray at biomed dot queensu dot ca
Status:
storage pool gets full) data is migrated to Tape.
-Item 29: An option to operate on all pools with update vol parameters
+Item 22: An option to operate on all pools with update vol parameters
Origin: Dmitriy Pinchukov <absh@bossdev.kiev.ua>
Date: 16 August 2006
Status: Patch made by Nigel Stepp
Volumes from Pool -> pool #.
-Item 30: Automatic disabling of devices
+Item 23: Automatic disabling of devices
Date: 2005-11-11
Origin: Peter Eriksson <peter at ifm.liu dot se>
Status:
instead.
-Item 31: List InChanger flag when doing restore.
- Origin: Jesper Krogh<jesper@krogh.cc>
- Date: 17 Oct 2008
- Status: Done in version 3.0.2
-
- What: When doing a restore the restore selection dialog ends by telling
- stuff like this:
- The job will require the following
- Volume(s) Storage(s) SD Device(s)
- ===========================================================================
- 000741L3 LTO-4 LTO3
- 000866L3 LTO-4 LTO3
- 000765L3 LTO-4 LTO3
- 000764L3 LTO-4 LTO3
- 000756L3 LTO-4 LTO3
- 001759L3 LTO-4 LTO3
- 001763L3 LTO-4 LTO3
- 001762L3 LTO-4 LTO3
- 001767L3 LTO-4 LTO3
-
- When having an autochanger, it would be really nice with an inChanger
- column so the operator knew if this restore job would stop waiting for
- operator intervention. This is done just by selecting the inChanger flag
- from the catalog and printing it in a seperate column.
-
-
- Why: This would help getting large restores through minimizing the
- time spent waiting for operator to drop by and change tapes in the library.
-
- Notes: [Kern] I think it would also be good to have the Slot as well,
- or some indication that Bacula thinks the volume is in the autochanger
- because it depends on both the InChanger flag and the Slot being
- valid.
-
-
-Item 32: Ability to defer Batch Insert to a later time
+Item 24: Ability to defer Batch Insert to a later time
Date: 26 April 2009
Origin: Eric
Status:
format (i.e. dependent on the import/export entities project).
-Item 33: Add MaxVolumeSize/MaxVolumeBytes statement to Storage resource
+Item 25: Add MaxVolumeSize/MaxVolumeBytes to Storage resource
Origin: Bastian Friedrich <bastian.friedrich@collax.com>
Date: 2008-07-09
Status: -
quite well.
-Item 34: Enable persistent naming/number of SQL queries
+Item 26: Enable persistent naming/number of SQL queries
Date: 24 Jan, 2007
Origin: Mark Bergman
Status:
than by number.
-Item 35: Port bat to Win32
- Date: 26 April 2009
- Origin: Kern/Eric
- Status:
-
- What: Make bat run on Win32/64.
-
- Why: To have GUI on Windows
-
- Notes:
-
-
-Item 36: Bacula Dir, FD and SD to support proxies
+Item 27: Bacula Dir, FD and SD to support proxies
Origin: Karl Grindley @ MIT Lincoln Laboratory <kgrindley at ll dot mit dot edu>
Date: 25 March 2009
Status: proposed
One could also possibly use stunnel, netcat, etc.
-Item 37: Add Minumum Spool Size directive
+Item 28: Add Minumum Spool Size directive
Date: 20 March 2008
Origin: Frank Sweetser <fs@wpi.edu>
gigabytes) it can easily produce multi-megabyte report emails!
-Item 38: Backup and Restore of Windows Encrypted Files using Win raw encryption
+Item 29: Handle Windows Encrypted Files using Win raw encryption
Origin: Michael Mohr, SAG Mohr.External@infineon.com
Date: 22 February 2008
Origin: Alex Ehrlich (Alex.Ehrlich-at-mail.ee)
encrypted-file-related callback functions.
-Item 39: Implement an interface between Bacula and Storage clould like Amazon's S3.
+Item 30: Implement a Storage device like Amazon's S3.
Date: 25 August 2008
Origin: Soren Hansen <soren@ubuntu.com>
Status: Not started.
if bacula want to recycle a volume, it will start by downloading the
file to truncate it few seconds later, if we can avoid that...
-Item 40: Convert Bacula existing tray monitor on Windows to a stand alone program
+Item 31: Convert tray monitor on Windows to a stand alone program
Date: 26 April 2009
Origin: Kern/Eric
Status:
a console connection).
-
-========= End items voted on May 2009 ==================
-
-========= New items after last vote ====================
-
-Item 1: Relabel disk volume after recycling
+Item 32: Relabel disk volume after recycling
Origin: Pasi Kärkkäinen <pasik@iki.fi>
Date: 07 May 2009.
Status: Not implemented yet, no code written.
Notes: The configuration option could be "Relabel after Recycling = Yes".
-Item n: Command that releases all drives in an autochanger
+Item 33: Command that releases all drives in an autochanger
Origin: Blake Dunlap (blake@nxs.net)
Date: 10/07/2009
Status: Request
configuration quicker/easier, as all drives need to be released
before any modifications to slots.
-Item n: Run bscan on a remote storage daemon from within bconsole.
+Item 34: Run bscan on a remote storage daemon from within bconsole.
Date: 07 October 2009
Origin: Graham Keeling <graham@equiinet.com>
Status: Proposing
code is used in both the bscan program and the Storage daemon to avoid
adding a lot of new code that must be maintained by the project.
-Item n: Implement a Migration job type that will create a reverse
+Item 35: Implement a Migration job type that will create a reverse
incremental (or decremental) backup from two existing full backups.
Date: 05 October 2009
Origin: Griffith College Dublin. Some sponsorship available.
Notes: This feature was previously discussed on the bacula-devel list
here: http://www.mail-archive.com/bacula-devel@lists.sourceforge.net/msg04962.html
-Item n: Job migration between different SDs
-Origin: Mariusz Czulada <manieq AT wp DOT eu>
-Date: 07 May 2007
-Status: NEW
+Item 36: Job migration between different SDs
+Origin: Mariusz Czulada <manieq AT wp DOT eu>
+Date: 07 May 2007
+Status: NEW
What: Allow to specify in migration job devices on Storage Daemon other then
the one used for migrated jobs (possibly on different/distant host)
now, could be done the same way (i mean 'localhost') to unify the
whole process
-Item n: Concurrent spooling and despooling withini a single job.
+Item 37: Concurrent spooling and despooling withini a single job.
Date: 17 nov 2009
Origin: Jesper Krogh <jesper@krogh.cc>
Status: NEW
and is harder to review for completeness. Subsequently it makes restores
more complex.
-Item 1: Extend the verify code to make it possible to verify
+Item 39: Extend the verify code to make it possible to verify
older jobs, not only the last one that has finished
Date: 10 April 2009
Origin: Ralf Gross (Ralf-Lists <at> ralfgross.de)
-Item n: Separate "Storage" and "Device" in the bacula-dir.conf
+Item 40: Separate "Storage" and "Device" in the bacula-dir.conf
Date: 29 April 2009
Origin: "James Harper" <james.harper@bendigoit.com.au>
Status: not implemented or documented
Notes:
+Item 41: Least recently used device selection for tape drives in autochanger.
+Date: 12 October 2009
+Origin: Thomas Carter <tcarter@memc.com>
+Status: Proposal
+
+What: A better tape drive selection algorithm for multi-drive
+ autochangers. The AUTOCHANGER class contains an array list of tape
+ devices. When a tape drive is needed, this list is always searched in
+ order. This causes lower number drives (specifically drive 0) to do a
+ majority of the work with higher numbered drives possibly never being
+ used. When a drive in an autochanger is reserved for use, its entry should
+ be moved to the end of the list; this would give a rough LRU drive
+ selection.
+
+Why: The current implementation places a majority of use and wear on drive
+ 0 of a multi-drive autochanger.
+
+Notes:
+
+========= New items after last vote ====================
========= Add new items above this line =================
========== Items put on hold by Kern ============================
+
+
+========== Items completed in version 5.0.0 ====================
+*Item 2: 'restore' menu: enter a JobId, automatically select dependents
+*Item 5: Deletion of disk Volumes when pruned (partial -- truncate when pruned)
+*Item 6: Implement Base jobs
+*Item 10: Restore from volumes on multiple storage daemons
+*Item 15: Enable/disable compression depending on storage device (disk/tape)
+*Item 20: Cause daemons to use a specific IP address to source communications
+*Item 23: "Maximum Concurrent Jobs" for drives when used with changer device
+*Item 31: List InChanger flag when doing restore.
+*Item 35: Port bat to Win32
# This is pretty much watered down version of the RedHat script
# that works on Solaris as well as Linux, but it won't work everywhere.
#
-# description: It comes by night and sucks the vital essence from your computers.
+# description: The Leading Open Source Backup Solution.
#
PSCMD="@PSCMD@"
# This is pretty much watered down version of the RedHat script
# that works on Solaris as well as Linux, but it won't work everywhere.
#
-# description: It comes by night and sucks the vital essence from your computers.
+# description: The Leading Open Source Backup Solution.
#
PSCMD="@PSCMD@"
# This is pretty much watered down version of the RedHat script
# that works on Solaris as well as Linux, but it won't work everywhere.
#
-# description: It comes by night and sucks the vital essence from your computers.
+# description: The Leading Open Source Backup Solution.
#
PSCMD="@PSCMD@"
# This is pretty much watered down version of the RedHat script
# that works on Solaris as well as Linux, but it won't work everywhere.
#
-# description: It comes by night and sucks the vital essence from your computers.
+# description: The Leading Open Source Backup Solution.
#
# All these are not *really* needed but it makes it
# btraceback.dbx
-echo "******** RUNNING LWPS/THREADS:"
+dbxenv language_mode c++
+
+echo "exename ==> \c"; print -l (char *)exename
+echo "exepath ==> \c"; print -l (char *)exepath
+echo "catalog_db ==> \c"; print -l (char *)catalog_db
+echo "version ==> \c"; print -l (char *)version
+echo "host_os ==> \c"; print -l (char *)host_os
+echo "distname ==> \c"; print -l (char *)distname
+echo "distver ==> \c"; print -l (char *)distver
+echo "dist_name ==> \c"; print -l (char *)dist_name
+echo "beef ==> \c"; print -l (int)beef
+
+echo "******** RUNNING THREADS/LWPS:"
echo
lwps
echo
echo
-echo "******** STACK TRACE OF CURRENT LWP:"
+echo "******** STACK TRACE OF CURRENT THREAD/LWP:"
echo
where
echo
echo
-echo "******** VARIABLES DUMP OF CURRENT LWP:"
+echo "******** VARIABLES DUMP OF CURRENT THREAD/LWP:"
echo
dump
-for LWP in 1 2 3 4 5 6 7 8; do
- (
- if lwp l@$LWP; then
- echo
- echo
- echo "******** STACK TRACE OF LWP ${LWP}:"
- echo
- where
+for LWP in $(lwps | sh sed -e 's/.*@//' -e 's/ .*//'); do
+(
+ if lwp l@$LWP; then
+ echo "******************************************"
+ echo
+ echo "******** STACK TRACE OF THREAD/LWP ${LWP}:"
+ echo
+ where
- echo
- echo
- echo "******** VARIABLES DUMP OF LWP ${LWP}:"
- echo
- dump
-
- fi
- )
+ echo
+ echo "******** VARIABLES DUMP OF THREAD/LWP ${LWP}:"
+ echo
+ dump
+ echo "******************************************"
+ fi
+)
done
quit
print distname
print distver
print host_name
+print dist_name
+print beef
show env TestName
bt
thread apply all bt
# $3 = working directory
#
PNAME=`basename $1`
-PNAME="${PNAME} on `hostname`"
WD="$3"
-if test `uname -s` = SunOS ; then
- gcore -o ${WD}/${PNAME} $2
- dbx $1 $2 <@scriptdir@/btraceback.dbx >${WD}/bacula.$2.traceback 2>&1
- cat ${WD}/bacula.$2.traceback \
- | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula DBX traceback of ${PNAME}" @dump_email@
-else
- gdb -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 >${WD}/bacula.$2.traceback 2>&1
- cat ${WD}/bacula.$2.traceback \
- | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula GDB traceback of ${PNAME}" @dump_email@
-fi
+case `uname -s` in
+SunOS)
+ #
+ # See what debuggers are available on this platform.
+ # We need to to some tricks to find out as a which on
+ # a non existing binary gives:
+ #
+ # no <debugger> in <PATH>
+ #
+ # So we use the return code which is 0 when it finds
+ # somethings and 1 if not.
+ #
+ which gdb > /dev/null 2>&1 && GDB=`which gdb` || GDB=''
+ which dbx > /dev/null 2>&1 && DBX=`which dbx` || DBX=''
+ which mdb > /dev/null 2>&1 && MDB=`which mdb` || MDB=''
+ gcore -o ${WD}/${PNAME} $2
+ if [ ! -z "${DBX}" ]; then
+ ${DBX} $1 $2 < @scriptdir@/btraceback.dbx > ${WD}/bacula.$2.traceback 2>&1
+ elif [ ! -z "${GDB}" ]; then
+ ${GDB} -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 > ${WD}/bacula.$2.traceback 2>&1
+ elif [ ! -z "${MDB}" ]; then
+ ${MDB} -u -p $2 < @scriptdir@/btraceback.mdb > ${WD}/bacula.$2.traceback 2>&1
+ fi
+ PNAME="${PNAME} on `hostname`"
+ cat ${WD}/bacula.$2.traceback \
+ | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula DBX traceback of ${PNAME}" @dump_email@
+ ;;
+*)
+ gdb -quiet -batch -x @scriptdir@/btraceback.gdb $1 $2 >${WD}/bacula.$2.traceback 2>&1
+ PNAME="${PNAME} on `hostname`"
+ cat ${WD}/bacula.$2.traceback \
+ | @sbindir@/bsmtp -h @smtp_host@ -f @dump_email@ -s "Bacula GDB traceback of ${PNAME}" @dump_email@
+ ;;
+esac
--- /dev/null
+# btraceback.mdb
+
+$G
+::echo "******** RUNNING LWPS/THREADS:"
+::echo
+::walk thread
+
+::echo
+::echo
+::echo "******** STACK TRACE OF CURRENT LWP:"
+::echo
+$C
+
+::echo
+::echo
+::echo "******** VARIABLES DUMP OF CURRENT LWP:"
+::echo
+
+::echo "******** STACK TRACE OF LWPS:"
+::walk thread | ::findstack
+
+::echo "******** VARIABLES DUMP OF LWPS:"
+
+::quit
# This is pretty much watered down version of the RedHat script
# that works on Solaris as well as Linux, but it won't work everywhere.
#
-# description: It comes by night and sucks the vital essence from your computers.
+# description: The Leading Open Source Backup Solution.
#
PSCMD="@PSCMD@"
#
# Written by Kern Sibbald
#
-# Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+# Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
#
# The main author of Bacula is Kern Sibbald, with contributions from
# many others, a complete list can be found in the file AUTHORS.
# Switzerland, email:ftf@fsfeurope.org.
#
#
-# $Id$
-#
# If you set in your Device resource
#
# Changer Command = "path-to-this-script/disk-changer %c %o %S %a %d"
# you will have the following input to this script:
#
# So Bacula will always call with all the following arguments, even though
-# in come cases, not all are used.
+# in come cases, not all are used. Note, the Volume name is not always
+# included.
#
-# disk-changer "changer-device" "command" "slot" "archive-device" "drive-index"
-# $1 $2 $3 $4 $5
+# disk-changer "changer-device" "command" "slot" "archive-device" "drive-index" "volume"
+# $1 $2 $3 $4 $5 $6
#
# By default the autochanger has 10 Volumes and 1 Drive.
#
#
# changer-device is the name of a file that overrides the default
# volumes and drives. It may have:
-# maxslot=n where n is one based (default 10)
-# maxdrive=m where m is zero based (default 1 -- i.e. 2 drives)
+# maxslot=n where n is one based (default 10)
+# maxdrive=m where m is zero based (default 1 -- i.e. 2 drives)
#
# This code can also simulate barcodes. You simply put
# a list of the slots and barcodes in the "base" directory/barcodes.
# any other part of the directory name. These restrictions could be
# easily removed by any clever script jockey.
#
-# Full example: disk-changer /var/bacula/conf load 1 /var/bacula/drive0 0
+# Full example: disk-changer /var/bacula/conf load 1 /var/bacula/drive0 0 TestVol001
#
# The Volumes will be created with names slot1, slot2, slot3, ... maxslot in the
# base directory. In the above example the base directory is /var/bacula.
# /var/bacula/slot3, ...) this script will create a /var/bacula/loadedn
# file to keep track of what Slot is loaded. You should not change this file.
#
+# Modified 8 June 2010 to accept Volume names from the calling program as arg 6.
+# In this case, rather than storing the data in slotn, it is stored in the
+# Volume name. Note: for this to work, Volume names may not include spaces.
#
wd=@working_dir@
dbgfile="$wd/disk-changer.log"
debug() {
if test -f $dbgfile; then
- echo "`date +\"%Y%m%d-%H:%M:%S\"` $*" >> $dbgfile
+ echo "`date +\"%Y%m%d-%H:%M:%S\"` $*" >> $dbgfile
fi
}
if test x${TMPFILE} = x; then
TMPFILE="$wd/disk-changer.$$"
if test -f ${TMPFILE}; then
- echo "Temp file security problem on: ${TMPFILE}"
- exit 1
+ echo "Temp file security problem on: ${TMPFILE}"
+ exit 1
fi
fi
}
pCount=$1
pCountNeed=$2
if test $pCount -lt $pCountNeed; then
- echo "usage: disk-changer ctl-device command [slot archive-device drive-index]"
- echo " Insufficient number of arguments arguments given."
- if test $pCount -lt 2; then
- echo " Mimimum usage is first two arguments ..."
- else
- echo " Command expected $pCountNeed arguments"
- fi
- exit 1
+ echo "usage: disk-changer ctl-device command [slot archive-device drive-index]"
+ echo " Insufficient number of arguments arguments given."
+ if test $pCount -lt 2; then
+ echo " Mimimum usage is first two arguments ..."
+ else
+ echo " Command expected $pCountNeed arguments"
+ fi
+ exit 1
fi
}
dir=`echo "$device" | sed -e s%/$bn%%g`
if [ ! -d $dir ]; then
echo "ERROR: Autochanger directory \"$dir\" does not exist."
- echo " You must create it."
+ echo " You must create it."
exit 1
fi
}
+#
+# Get the Volume name from the call line, or directly from
+# the volslotn information.
+#
+get_vol() {
+ havevol=0
+ debug "vol=$volume"
+ if test "x$volume" != x && test "x$volume" != "x*NONE*" ; then
+ debug "touching $dir/$volume"
+ touch $dir/$volume
+ echo "$volume" >$dir/volslot${slot}
+ havevol=1
+ elif [ -f $dir/volslot${slot} ]; then
+ volume=`cat $dir/volslot${slot}`
+ havevol=1
+ fi
+}
+
# Setup arguments
ctl=$1
slot=$3
device=$4
drive=$5
+volume=$6
# set defaults
maxdrive=1
#
case $2 in
list|listall)
- check_parm_count $# 2
- ;;
+ check_parm_count $# 2
+ ;;
slots)
- check_parm_count $# 2
- ;;
+ check_parm_count $# 2
+ ;;
transfer)
- check_parm_count $# 4
- if [ $slot -gt $maxslot ]; then
- echo "Slot ($slot) out of range (1-$maxslot)"
- exit 1
- fi
- ;;
+ check_parm_count $# 4
+ if [ $slot -gt $maxslot ]; then
+ echo "Slot ($slot) out of range (1-$maxslot)"
+ exit 1
+ fi
+ ;;
*)
- check_parm_count $# 5
- if [ $drive -gt $maxdrive ]; then
- echo "Drive ($drive) out of range (0-$maxdrive)"
- exit 1
- fi
- if [ $slot -gt $maxslot ]; then
- echo "Slot ($slot) out of range (1-$maxslot)"
- exit 1
- fi
- ;;
+ check_parm_count $# 5
+ if [ $drive -gt $maxdrive ]; then
+ echo "Drive ($drive) out of range (0-$maxdrive)"
+ exit 1
+ fi
+ if [ $slot -gt $maxslot ]; then
+ echo "Slot ($slot) out of range (1-$maxslot)"
+ exit 1
+ fi
+ ;;
esac
-
-debug "Parms: $ctl $cmd $slot $device $drive"
+debug "Parms: $ctl $cmd $slot $device $drive $volume $havevol"
case $cmd in
unload)
- debug "Doing disk -f $ctl unload $slot $device $drive"
+ debug "Doing disk -f $ctl unload $slot $device $drive $volume"
get_dir
if [ -f $dir/loaded${drive} ]; then
- ld=`cat $dir/loaded${drive}`
+ ld=`cat $dir/loaded${drive}`
else
- echo "Storage Element $slot is Already Full"
- exit 1
+ echo "Storage Element $slot is Already Full"
+ exit 1
fi
if [ $slot -eq $ld ]; then
- echo "0" >$dir/loaded${drive}
- unlink $device 2>/dev/null >/dev/null
- rm -f $device
+ echo "0" >$dir/loaded${drive}
+ unlink $device 2>/dev/null >/dev/null
+ rm -f $device
else
- echo "Storage Element $slot is Already Full"
- exit 1
+ echo "Storage Element $slot is Already Full"
+ exit 1
fi
;;
load)
- debug "Doing disk $ctl load $slot $device $drive"
+ debug "Doing disk $ctl load $slot $device $drive $volume"
get_dir
i=0
while [ $i -le $maxdrive ]; do
- if [ -f $dir/loaded${i} ]; then
- ld=`cat $dir/loaded${i}`
- else
- ld=0
- fi
- if [ $ld -eq $slot ]; then
- echo "Drive ${i} Full (Storage element ${ld} loaded)"
- exit 1
- fi
- i=`expr $i + 1`
+ if [ -f $dir/loaded${i} ]; then
+ ld=`cat $dir/loaded${i}`
+ else
+ ld=0
+ fi
+ if [ $ld -eq $slot ]; then
+ echo "Drive ${i} Full (Storage element ${ld} loaded)"
+ exit 1
+ fi
+ i=`expr $i + 1`
done
- # check if slot exists
- if [ ! -f $dir/slot${slot} ] ; then
- echo "source Element Address $slot is Empty"
- exit 1
+ # Check if we have a Volume name
+ get_vol
+ if [ $havevol -eq 0 ]; then
+ # check if slot exists
+ if [ ! -f $dir/slot${slot} ] ; then
+ echo "source Element Address $slot is Empty"
+ exit 1
+ fi
fi
if [ -f $dir/loaded${drive} ]; then
- ld=`cat $dir/loaded${drive}`
+ ld=`cat $dir/loaded${drive}`
else
- ld=0
+ ld=0
fi
if [ $ld -ne 0 ]; then
- echo "Drive ${drive} Full (Storage element ${ld} loaded)"
- exit 1
+ echo "Drive ${drive} Full (Storage element ${ld} loaded)"
+ exit 1
fi
echo "0" >$dir/loaded${drive}
unlink $device 2>/dev/null >/dev/null
rm -f $device
- ln -s $dir/slot${slot} $device
- rtn=$?
+ if [ $havevol -ne 0 ]; then
+ ln -s $dir/$volume $device
+ rtn=$?
+ else
+ ln -s $dir/slot${slot} $device
+ rtn=$?
+ fi
if [ $rtn -eq 0 ]; then
- echo $slot >$dir/loaded${drive}
+ echo $slot >$dir/loaded${drive}
fi
exit $rtn
;;
debug "Doing disk -f $ctl -- to list volumes"
get_dir
if [ -f $dir/barcodes ]; then
- cat $dir/barcodes
+ cat $dir/barcodes
else
- i=1
- while [ $i -le $maxslot ]; do
- echo "$i:"
- i=`expr $i + 1`
- done
+ i=1
+ while [ $i -le $maxslot ]; do
+ slot=$i
+ volume=
+ get_vol
+ if [ $havevol -eq 0 ]; then
+ echo "$i:"
+ else
+ echo "$i:$volume"
+ fi
+ i=`expr $i + 1`
+ done
fi
exit 0
;;
listall)
+ # ***FIXME*** must add new Volume stuff
make_temp_file
debug "Doing disk -f $ctl -- to list volumes"
get_dir
if [ ! -f $dir/barcodes ]; then
- exit 0
+ exit 0
fi
# we print drive content seen by autochanger
# and we also remove loaded media from the barcode list
i=0
while [ $i -le $maxdrive ]; do
- if [ -f $dir/loaded${i} ]; then
- ld=`cat $dir/loaded${i}`
- v=`awk -F: "/^$ld:/"' { print $2 }' $dir/barcodes`
- echo "D:$i:F:$ld:$v"
- echo "^$ld:" >> $TMPFILE
- fi
- i=`expr $i + 1`
+ if [ -f $dir/loaded${i} ]; then
+ ld=`cat $dir/loaded${i}`
+ v=`awk -F: "/^$ld:/"' { print $2 }' $dir/barcodes`
+ echo "D:$i:F:$ld:$v"
+ echo "^$ld:" >> $TMPFILE
+ fi
+ i=`expr $i + 1`
done
# Empty slots are not in barcodes file
grep -v -f $TMPFILE $dir/barcodes | sort -n | \
perl -ne 'BEGIN { $cur=1 }
if (/(\d+):(.+)?/) {
- if ($cur == $1) {
- print "S:$1:F:$2\n"
- } else {
- while ($cur < $1) {
- print "S:$cur:E\n";
- $cur++;
- }
- }
- $cur++;
+ if ($cur == $1) {
+ print "S:$1:F:$2\n"
+ } else {
+ while ($cur < $1) {
+ print "S:$cur:E\n";
+ $cur++;
+ }
+ }
+ $cur++;
}
END { while ($cur < '"$maxslot"') { print "S:$cur:E\n"; $cur++; } } '
exit 0
;;
transfer)
+ # ***FIXME*** must add new Volume stuff
get_dir
make_temp_file
slotdest=$device
if [ -f $dir/slot{$slotdest} ]; then
- echo "destination Element Address $slot is Full"
- exit 1
+ echo "destination Element Address $slot is Full"
+ exit 1
fi
if [ ! -f $dir/slot${slot} ] ; then
- echo "source Element Address $slot is Empty"
- exit 1
+ echo "source Element Address $slot is Empty"
+ exit 1
fi
echo "Transfering $slot to $slotdest"
mv $dir/slot${slot} $dir/slot{$slotdest}
if [ -f $dir/barcodes ]; then
- sed "s/^$slot:/$slotdest:/" > $TMPFILE
- sort -n $TMPFILE > $dir/barcodes
+ sed "s/^$slot:/$slotdest:/" > $TMPFILE
+ sort -n $TMPFILE > $dir/barcodes
fi
exit 0
;;
debug "Doing disk -f $ctl $drive -- to find what is loaded"
get_dir
if [ -f $dir/loaded${drive} ]; then
- cat $dir/loaded${drive}
+ cat $dir/loaded${drive}
else
- echo "0"
+ echo "0"
fi
exit
;;
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
*/
-/*
+/**
* General header file configurations that apply to
* all daemons. System dependent stuff goes here.
*
- * Version $Id$
*/
#define WIN32_REPARSE_POINT 1
#define WIN32_MOUNT_POINT 2
+/* Reduce compiler warnings from Windows vss code */
+#define uuid(x)
+
void InitWinAPIWrapper();
#define OSDependentInit() InitWinAPIWrapper()
#define B_DEV_BSIZE 512
#endif
-/*
+/**
* Set to time limit for other end to respond to
* authentication. Normally 10 minutes is *way*
* more than enough. The idea is to keep the Director
*/
#define DEFAULT_NETWORK_BUFFER_SIZE (64 * 1024)
-/*
+/**
* Stream definitions. Once defined these must NEVER
* change as they go on the storage media.
* Note, the following streams are passed from the SD to the DIR
#define STREAM_PLUGIN_DATA 27 /* Plugin specific data */
#define STREAM_RESTORE_OBJECT 28 /* Plugin restore object */
-/*
+/**
* Additional Stream definitions. Once defined these must NEVER
* change as they go on the storage media.
*
#define STREAM_ACL_SOLARIS_ACE 1013 /* Solaris specific ace_t string representation from
* from acl_totext (NFSv4 or ZFS acl)
*/
+#define STREAM_ACL_AFS_TEXT 1014 /* AFS specific string representation from pioctl */
#define STREAM_XATTR_OPENBSD 1993 /* OpenBSD specific extended attributes */
#define STREAM_XATTR_SOLARIS_SYS 1994 /* Solaris specific extensible attributes or
* otherwise named extended system attributes.
#define STREAM_XATTR_LINUX 1998 /* Linux specific extended attributes */
#define STREAM_XATTR_NETBSD 1999 /* NetBSD specific extended attributes */
-/*
+/**
* File type (Bacula defined).
* NOTE!!! These are saved in the Attributes record on the tape, so
* do not change them. If need be, add to them.
#define FT_NOOPEN 15 /* Could not open directory */
#define FT_RAW 16 /* Raw block device */
#define FT_FIFO 17 /* Raw fifo device */
-/* The DIRBEGIN packet is sent to the FD file processing routine so
+/**
+ * The DIRBEGIN packet is sent to the FD file processing routine so
* that it can filter packets, but otherwise, it is not used
* or saved */
#define FT_DIRBEGIN 18 /* Directory at beginning (not saved) */
#define FT_PLUGIN 22 /* Plugin generated filename */
#define FT_DELETED 23 /* Deleted file entry */
#define FT_BASE 24 /* Duplicate base file entry */
+#define FT_RESTORE_FIRST 25 /* Restore this "object" first */
/* Definitions for upper part of type word (see above). */
#define AR_DATA_STREAM (1<<16) /* Data stream id present */
-/*
+/**
* Tape label types -- stored in catalog
*/
#define B_BACULA_LABEL 0
#define B_ANSI_LABEL 1
#define B_IBM_LABEL 2
-/*
+/**
* Actions on purge (bit mask)
*/
-#define AOP_TRUNCATE 1
+#define ON_PURGE_TRUNCATE 1
+#define AOP_TRUNCATE 1
/* Size of File Address stored in STREAM_SPARSE_DATA. Do NOT change! */
#define SPARSE_FADDR_SIZE (sizeof(uint64_t))
#define CRYPTO_LEN_SIZE ((int)sizeof(uint32_t))
-/* This is for dumb compilers/libraries like Solaris. Linux GCC
+/**
+ * This is for dumb compilers/libraries like Solaris. Linux GCC
* does it correctly, so it might be worthwhile
* to remove the isascii(c) with ifdefs on such
* "smart" systems.
#define B_ISUPPER(c) (isascii((int)(c)) && isupper((int)(c)))
#define B_ISDIGIT(c) (isascii((int)(c)) && isdigit((int)(c)))
-/* For multiplying by 10 with shift and addition */
+/** For multiplying by 10 with shift and addition */
#define B_TIMES10(d) ((d<<3)+(d<<1))
#define S_ISLNK(m) (((m) & S_IFM) == S_IFLNK)
#endif
-/* Added by KES to deal with Win32 systems */
+/** Added by KES to deal with Win32 systems */
#ifndef S_ISWIN32
#define S_ISWIN32 020000
#endif
#endif
-/*
+/**
* The digit following Dmsg and Emsg indicates the number of substitutions in
* the message string. We need to do this kludge because non-GNU compilers
* do not handle varargs #defines.
*/
-/* Debug Messages that are printed */
+/** Debug Messages that are printed */
#ifdef DEBUG
#define Dmsg0(lvl, msg) if ((lvl)<=debug_level) d_msg(__FILE__, __LINE__, lvl, msg)
#define Dmsg1(lvl, msg, a1) if ((lvl)<=debug_level) d_msg(__FILE__, __LINE__, lvl, msg, a1)
-/* Messages that are printed (uses d_msg) */
+/** Messages that are printed (uses d_msg) */
#define Pmsg0(lvl, msg) p_msg(__FILE__, __LINE__, lvl, msg)
#define Pmsg1(lvl, msg, a1) p_msg(__FILE__, __LINE__, lvl, msg, a1)
#define Pmsg2(lvl, msg, a1, a2) p_msg(__FILE__, __LINE__, lvl, msg, a1, a2)
#define Pmsg14(lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14) p_msg(__FILE__,__LINE__,lvl,msg,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14)
-/* Daemon Error Messages that are delivered according to the message resource */
+/** Daemon Error Messages that are delivered according to the message resource */
#define Emsg0(typ, lvl, msg) e_msg(__FILE__, __LINE__, typ, lvl, msg)
#define Emsg1(typ, lvl, msg, a1) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1)
#define Emsg2(typ, lvl, msg, a1, a2) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2)
#define Emsg5(typ, lvl, msg, a1, a2, a3, a4, a5) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5)
#define Emsg6(typ, lvl, msg, a1, a2, a3, a4, a5, a6) e_msg(__FILE__, __LINE__, typ, lvl, msg, a1, a2, a3, a4, a5, a6)
-/* Job Error Messages that are delivered according to the message resource */
+/** Job Error Messages that are delivered according to the message resource */
#define Jmsg0(jcr, typ, lvl, msg) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg)
#define Jmsg1(jcr, typ, lvl, msg, a1) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1)
#define Jmsg2(jcr, typ, lvl, msg, a1, a2) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2)
#define Jmsg5(jcr, typ, lvl, msg, a1, a2, a3, a4, a5) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5)
#define Jmsg6(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) j_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6)
-/* Queued Job Error Messages that are delivered according to the message resource */
+/** Queued Job Error Messages that are delivered according to the message resource */
#define Qmsg0(jcr, typ, lvl, msg) q_msg(__FILE__, __LINE__, jcr, typ, lvl, msg)
#define Qmsg1(jcr, typ, lvl, msg, a1) q_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1)
#define Qmsg2(jcr, typ, lvl, msg, a1, a2) q_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2)
#define Qmsg6(jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6) q_msg(__FILE__, __LINE__, jcr, typ, lvl, msg, a1, a2, a3, a4, a5, a6)
-/* Memory Messages that are edited into a Pool Memory buffer */
+/** Memory Messages that are edited into a Pool Memory buffer */
#define Mmsg0(buf, msg) m_msg(__FILE__, __LINE__, buf, msg)
#define Mmsg1(buf, msg, a1) m_msg(__FILE__, __LINE__, buf, msg, a1)
#define Mmsg2(buf, msg, a1, a2) m_msg(__FILE__, __LINE__, buf, msg, a1, a2)
int m_msg(const char *file, int line, POOLMEM *&pool_buf, const char *fmt, ...);
-/* Use our strdup with smartalloc */
+/** Use our strdup with smartalloc */
#ifndef HAVE_WXCONSOLE
#undef strdup
#define strdup(buf) bad_call_on_strdup_use_bstrdup(buf)
#endif
#endif
-/* Use our fgets which handles interrupts */
+/** Use our fgets which handles interrupts */
#undef fgets
#define fgets(x,y,z) bfgets((x), (y), (z))
-/* Use our sscanf, which is safer and works with known sizes */
+/** Use our sscanf, which is safer and works with known sizes */
#define sscanf bsscanf
#ifdef DEBUG
#define bmalloc(size) b_malloc(__FILE__, __LINE__, (size))
#endif
-/* Macro to simplify free/reset pointers */
+/** Macro to simplify free/reset pointers */
#define bfree_and_null(a) do{if(a){free(a); (a)=NULL;}} while(0)
-/*
+/**
* Replace codes needed in both file routines and non-file routines
* Job replace codes -- in "replace"
*/
#define REPLACE_NEVER 'n'
#define REPLACE_IFOLDER 'o'
-/* This probably should be done on a machine by machine basis, but it works */
-/* This is critical for the smartalloc routines to properly align memory */
+/** This probably should be done on a machine by machine basis, but it works */
+/** This is critical for the smartalloc routines to properly align memory */
#define ALIGN_SIZE (sizeof(double))
#define BALIGN(x) (((x) + ALIGN_SIZE - 1) & ~(ALIGN_SIZE -1))
#ifdef HAVE_SUN_OS
- /*
+ /**
* On Solaris 2.5, threads are not timesliced by default, so we need to
* explictly increase the conncurrency level.
*/
#else
-/* Not needed on most systems */
+/** Not needed on most systems */
#define set_thread_concurrency(x)
#endif
#endif
-/* HP-UX 11 specific workarounds */
+/** HP-UX 11 specific workarounds */
#ifdef HAVE_HPUX_OS
# undef h_errno
extern int h_errno;
-/* the {get,set}domainname() functions exist in HPUX's libc.
+/** the {get,set}domainname() functions exist in HPUX's libc.
* the configure script detects that correctly.
* the problem is no system headers declares the prototypes for these functions
* this is done below
#endif
-/* Disabled because it breaks internationalisation...
+/** Disabled because it breaks internationalisation...
#undef HAVE_SETLOCALE
#ifdef HAVE_SETLOCALE
#include <locale.h>
#endif
*/
-/* Determine endiannes */
+/** Determine endianes */
static inline bool bigendian() { return htonl(1) == 1L; }
#endif /* _BACONFIG_H */
LIBBACSQL_SRCS = mysql.c dbi.c \
sql.c sql_cmds.c sql_create.c sql_delete.c sql_find.c \
sql_get.c sql_list.c sql_update.c sqlite.c \
- postgresql.c ingres.c myingres.c \
+ postgresql.c \
bvfs.c
LIBBACSQL_OBJS = $(LIBBACSQL_SRCS:.c=.o)
LIBBACSQL_LOBJS = $(LIBBACSQL_SRCS:.c=.lo)
esql:
@echo "Generating myingres.c from myingres.sc"
- $(NO_ECHO)$(II_SYSTEM)/ingres/bin/esqlcc -extension=c myingres.sc
+ $(NO_ECHO)$(II_SYSTEM)/ingres/bin/esqlcc -multi -extension=c myingres.sc
@echo "Generating myingres.h from myingres.sh"
$(NO_ECHO)$(II_SYSTEM)/ingres/bin/esqlcc -extension=h myingres.sh
+++ /dev/null
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-/*
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- *
- */
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-
-#ifdef HAVE_BACULA_DB
-
-uint32_t bacula_db_version = 0;
-
-int db_type = 0;
-
-/* List of open databases */
-static BQUEUE db_list = {&db_list, &db_list};
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-
-#define DB_CONTROL_FILENAME "control.db"
-#define DB_JOBS_FILENAME "jobs.db"
-#define DB_POOLS_FILENAME "pools.db"
-#define DB_MEDIA_FILENAME "media.db"
-#define DB_JOBMEDIA_FILENAME "jobmedia.db"
-#define DB_CLIENT_FILENAME "client.db"
-#define DB_FILESET_FILENAME "fileset.db"
-
-
-B_DB *db_init(JCR *jcr, const char *db_driver, const char *db_name, const char *db_user,
- const char *db_password, const char *db_address, int db_port,
- const char *db_socket, int mult_db_connections)
-{
- return db_init_database(jcr, db_name, db_user, db_password, db_address,
- db_port, db_socket, mult_db_connections);
-}
-
-
-dbid_list::dbid_list()
-{
- memset(this, 0, sizeof(dbid_list));
- max_ids = 1000;
- DBId = (DBId_t *)malloc(max_ids * sizeof(DBId_t));
- num_ids = num_seen = tot_ids = 0;
- PurgedFiles = NULL;
-}
-
-dbid_list::~dbid_list()
-{
- free(DBId);
-}
-
-static POOLMEM *make_filename(B_DB *mdb, const char *name)
-{
- char sep;
- POOLMEM *dbf;
-
- dbf = get_pool_memory(PM_FNAME);
- if (IsPathSeparator(working_directory[strlen(working_directory)-1])) {
- sep = 0;
- } else {
- sep = '/';
- }
- Mmsg(dbf, "%s%c%s-%s", working_directory, sep, mdb->db_name, name);
- return dbf;
-}
-
-int bdb_write_control_file(B_DB *mdb)
-{
- mdb->control.time = time(NULL);
- lseek(mdb->cfd, 0, SEEK_SET);
- if (write(mdb->cfd, &mdb->control, sizeof(mdb->control)) != sizeof(mdb->control)) {
- Mmsg1(&mdb->errmsg, "Error writing control file. ERR=%s\n", strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- return 0;
- }
- return 1;
-}
-
-/*
- * Retrieve database type
- */
-const char *
-db_get_type(void)
-{
- return "Internal";
-}
-
-/*
- * Initialize database data structure. In principal this should
- * never have errors, or it is really fatal.
- */
-B_DB *
-db_init_database(JCR *jcr, char const *db_name, char const *db_user, char const *db_password,
- char const *db_address, int db_port, char const *db_socket,
- int mult_db_connections)
-{
- B_DB *mdb;
- P(mutex); /* lock DB queue */
- /* Look to see if DB already open */
- for (mdb=NULL; (mdb=(B_DB *)qnext(&db_list, &mdb->bq)); ) {
- if (strcmp(mdb->db_name, db_name) == 0) {
- Dmsg2(200, "DB REopen %d %s\n", mdb->ref_count, db_name);
- mdb->ref_count++;
- V(mutex);
- return mdb; /* already open */
- }
- }
-
- Dmsg0(200, "db_open first time\n");
- mdb = (B_DB *)malloc(sizeof(B_DB));
- memset(mdb, 0, sizeof(B_DB));
- Dmsg0(200, "DB struct init\n");
- mdb->db_name = bstrdup(db_name);
- mdb->errmsg = get_pool_memory(PM_EMSG);
- *mdb->errmsg = 0;
- mdb->cmd = get_pool_memory(PM_EMSG); /* command buffer */
- mdb->ref_count = 1;
- mdb->cached_path = get_pool_memory(PM_FNAME);
- mdb->cached_path_id = 0;
- qinsert(&db_list, &mdb->bq); /* put db in list */
- Dmsg0(200, "Done db_open_database()\n");
- mdb->cfd = -1;
- V(mutex);
- Jmsg(jcr, M_WARNING, 0, _("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"));
- Jmsg(jcr, M_WARNING, 0, _("WARNING!!!! The Internal Database is NOT OPERATIONAL!\n"));
- Jmsg(jcr, M_WARNING, 0, _("You should use SQLite, PostgreSQL, or MySQL\n"));
-
- return mdb;
-}
-
-/*
- * Now actually open the database. This can generate errors,
- * which are returned in the errmsg
- */
-int
-db_open_database(JCR *jcr, B_DB *mdb)
-{
- char *dbf;
- int fd, badctl;
- off_t filend;
- int errstat;
-
- Dmsg1(200, "db_open_database() %s\n", mdb->db_name);
-
- P(mutex);
-
- if ((errstat=rwl_init(&mdb->lock)) != 0) {
- Mmsg1(&mdb->errmsg, _("Unable to initialize DB lock. ERR=%s\n"), strerror(errstat));
- V(mutex);
- return 0;
- }
-
- Dmsg0(200, "make_filename\n");
- dbf = make_filename(mdb, DB_CONTROL_FILENAME);
- mdb->cfd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- if (mdb->cfd < 0) {
- Mmsg2(&mdb->errmsg, _("Unable to open Catalog DB control file %s: ERR=%s\n"),
- dbf, strerror(errno));
- V(mutex);
- return 0;
- }
- Dmsg0(200, "DB open\n");
- /* See if the file was previously written */
- filend = lseek(mdb->cfd, 0, SEEK_END);
- if (filend == 0) { /* No, initialize everything */
- Dmsg0(200, "Init DB files\n");
- memset(&mdb->control, 0, sizeof(mdb->control));
- mdb->control.bdb_version = BDB_VERSION;
- bdb_write_control_file(mdb);
-
- /* Create Jobs File */
- dbf = make_filename(mdb, DB_JOBS_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
-
- /* Create Pools File */
- dbf = make_filename(mdb, DB_POOLS_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
-
- /* Create Media File */
- dbf = make_filename(mdb, DB_MEDIA_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
-
- /* Create JobMedia File */
- dbf = make_filename(mdb, DB_JOBMEDIA_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
-
- /* Create Client File */
- dbf = make_filename(mdb, DB_CLIENT_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
-
- /* Create FileSet File */
- dbf = make_filename(mdb, DB_FILESET_FILENAME);
- fd = open(dbf, O_CREAT|O_RDWR, 0600);
- free_memory(dbf);
- close(fd);
- }
-
- Dmsg0(200, "Read control file\n");
- badctl = 0;
- lseek(mdb->cfd, 0, SEEK_SET); /* seek to begining of control file */
- if (read(mdb->cfd, &mdb->control, sizeof(mdb->control)) != sizeof(mdb->control)) {
- Mmsg1(&mdb->errmsg, _("Error reading catalog DB control file. ERR=%s\n"), strerror(errno));
- badctl = 1;
- } else if (mdb->control.bdb_version != BDB_VERSION) {
- Mmsg2(&mdb->errmsg, _("Error, catalog DB control file wrong version. "
-"Wanted %d, got %d\n"
-"Please reinitialize the working directory.\n"),
- BDB_VERSION, mdb->control.bdb_version);
- badctl = 1;
- }
- bacula_db_version = mdb->control.bdb_version;
- if (badctl) {
- V(mutex);
- return 0;
- }
- V(mutex);
- return 1;
-}
-
-void db_close_database(JCR *jcr, B_DB *mdb)
-{
- P(mutex);
- mdb->ref_count--;
- if (mdb->ref_count == 0) {
- qdchain(&mdb->bq);
- /* close file descriptors */
- if (mdb->cfd >= 0) {
- close(mdb->cfd);
- }
- free(mdb->db_name);
- if (mdb->jobfd) {
- fclose(mdb->jobfd);
- }
- if (mdb->poolfd) {
- fclose(mdb->poolfd);
- }
- if (mdb->mediafd) {
- fclose(mdb->mediafd);
- }
- if (mdb->jobmediafd) {
- fclose(mdb->jobmediafd);
- }
- if (mdb->clientfd) {
- fclose(mdb->clientfd);
- }
- if (mdb->filesetfd) {
- fclose(mdb->filesetfd);
- }
- rwl_destroy(&mdb->lock);
- free_pool_memory(mdb->errmsg);
- free_pool_memory(mdb->cmd);
- free_pool_memory(mdb->cached_path);
- free(mdb);
- }
- V(mutex);
-}
-
-void db_thread_cleanup()
-{ }
-
-
-void db_escape_string(JCR *jcr, B_DB *db, char *snew, char *old, int len)
-{
- memset(snew, 0, len);
- bstrncpy(snew, old, len);
-}
-
-char *db_strerror(B_DB *mdb)
-{
- return mdb->errmsg;
-}
-
-bool db_sql_query(B_DB *mdb, char const *query, DB_RESULT_HANDLER *result_handler, void *ctx)
-{
- return true;
-}
-
-/*
- * Open the Jobs file for reading/writing
- */
-int bdb_open_jobs_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->jobfd) {
- dbf = make_filename(mdb, DB_JOBS_FILENAME);
- mdb->jobfd = fopen(dbf, "r+b");
- if (!mdb->jobfd) {
- Mmsg2(&mdb->errmsg, "Error opening DB Jobs file %s: ERR=%s\n",
- dbf, strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- free_memory(dbf);
- return 0;
- }
- free_memory(dbf);
- }
- return 1;
-}
-
-/*
- * Open the JobMedia file for reading/writing
- */
-int bdb_open_jobmedia_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->jobmediafd) {
- dbf = make_filename(mdb, DB_JOBMEDIA_FILENAME);
- mdb->jobmediafd = fopen(dbf, "r+b");
- if (!mdb->jobmediafd) {
- Mmsg2(&mdb->errmsg, "Error opening DB JobMedia file %s: ERR=%s\n",
- dbf, strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- free_memory(dbf);
- return 0;
- }
- free_memory(dbf);
- }
- return 1;
-}
-
-
-/*
- * Open the Pools file for reading/writing
- */
-int bdb_open_pools_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->poolfd) {
- dbf = make_filename(mdb, DB_POOLS_FILENAME);
- mdb->poolfd = fopen(dbf, "r+b");
- if (!mdb->poolfd) {
- Mmsg2(&mdb->errmsg, "Error opening DB Pools file %s: ERR=%s\n",
- dbf, strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- free_memory(dbf);
- return 0;
- }
- Dmsg1(200, "Opened pool file %s\n", dbf);
- free_memory(dbf);
- }
- return 1;
-}
-
-/*
- * Open the Client file for reading/writing
- */
-int bdb_open_client_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->clientfd) {
- dbf = make_filename(mdb, DB_CLIENT_FILENAME);
- mdb->clientfd = fopen(dbf, "r+b");
- if (!mdb->clientfd) {
- Mmsg2(&mdb->errmsg, "Error opening DB Clients file %s: ERR=%s\n",
- dbf, strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- free_memory(dbf);
- return 0;
- }
- free_memory(dbf);
- }
- return 1;
-}
-
-/*
- * Open the FileSet file for reading/writing
- */
-int bdb_open_fileset_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->filesetfd) {
- dbf = make_filename(mdb, DB_CLIENT_FILENAME);
- mdb->filesetfd = fopen(dbf, "r+b");
- if (!mdb->filesetfd) {
- Mmsg2(&mdb->errmsg, "Error opening DB FileSet file %s: ERR=%s\n",
- dbf, strerror(errno));
- Emsg0(M_FATAL, 0, mdb->errmsg);
- free_memory(dbf);
- return 0;
- }
- free_memory(dbf);
- }
- return 1;
-}
-
-
-
-/*
- * Open the Media file for reading/writing
- */
-int bdb_open_media_file(B_DB *mdb)
-{
- char *dbf;
-
- if (!mdb->mediafd) {
- dbf = make_filename(mdb, DB_MEDIA_FILENAME);
- mdb->mediafd = fopen(dbf, "r+b");
- if (!mdb->mediafd) {
- Mmsg2(&mdb->errmsg, "Error opening DB Media file %s: ERR=%s\n",
- dbf, strerror(errno));
- free_memory(dbf);
- return 0;
- }
- free_memory(dbf);
- }
- return 1;
-}
-
-
-void _db_lock(const char *file, int line, B_DB *mdb)
-{
- int errstat;
- if ((errstat=rwl_writelock(&mdb->lock)) != 0) {
- e_msg(file, line, M_ABORT, 0, "rwl_writelock failure. ERR=%s\n",
- strerror(errstat));
- }
-}
-
-void _db_unlock(const char *file, int line, B_DB *mdb)
-{
- int errstat;
- if ((errstat=rwl_writeunlock(&mdb->lock)) != 0) {
- e_msg(file, line, M_ABORT, 0, "rwl_writeunlock failure. ERR=%s\n",
- strerror(errstat));
- }
-}
-
-/*
- * Start a transaction. This groups inserts and makes things
- * much more efficient. Usually started when inserting
- * file attributes.
- */
-void db_start_transaction(JCR *jcr, B_DB *mdb)
-{
-}
-
-void db_end_transaction(JCR *jcr, B_DB *mdb)
-{
-}
-
-bool db_update_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr)
-{ return true; }
-
-void
-db_list_pool_records(JCR *jcr, B_DB *mdb, POOL_DBR *pdbr,
- DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-int db_int64_handler(void *ctx, int num_fields, char **row)
-{ return 0; }
-
-bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
-{
- return true;
-}
-
-int db_create_file_item(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
-{
- return 1;
-}
-
-
-/*
- * Create a new record for the Job
- * This record is created at the start of the Job,
- * it is updated in bdb_update.c when the Job terminates.
- *
- * Returns: 0 on failure
- * 1 on success
- */
-bool db_create_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return 0;
-}
-
-/* Create a JobMedia record for Volume used this job
- * Returns: 0 on failure
- * record-id on success
- */
-bool db_create_jobmedia_record(JCR *jcr, B_DB *mdb, JOBMEDIA_DBR *jm)
-{
- return 0;
-}
-
-
-/*
- * Create a unique Pool record
- * Returns: 0 on failure
- * 1 on success
- */
-bool db_create_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr)
-{ return false; }
-
-bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *dr)
-{ return false; }
-
-bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *dr)
-{ return false; }
-
-
-/*
- * Create Unique Media record. This record
- * contains all the data pertaining to a specific
- * Volume.
- *
- * Returns: 0 on failure
- * 1 on success
- */
-int db_create_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-int db_create_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{
- return 0;
-}
-
-bool db_create_fileset_record(JCR *jcr, B_DB *mdb, FILESET_DBR *fsr)
-{
- return false;
-}
-
-int db_create_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{ return 0; }
-
-bool db_write_batch_file_records(JCR *jcr) { return false; }
-bool my_batch_start(JCR *jcr, B_DB *mdb) { return false; }
-bool my_batch_end(JCR *jcr, B_DB *mdb, const char *error) { return false; }
-bool my_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) { return false; }
-
-int db_delete_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-int db_delete_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-bool db_find_job_start_time(JCR *jcr, B_DB *mdb, JOB_DBR *jr, POOLMEM **stime)
-{
- return 0;
-}
-
-int
-db_find_next_volume(JCR *jcr, B_DB *mdb, int item, bool InChanger, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-bool
-db_find_last_jobid(JCR *jcr, B_DB *mdb, const char *Name, JOB_DBR *jr)
-{ return false; }
-
-bool
-db_find_failed_job_since(JCR *jcr, B_DB *mdb, JOB_DBR *jr, POOLMEM *stime, int &JobLevel)
-{ return false; }
-
-bool db_get_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return 0;
-}
-
-int db_get_num_pool_records(JCR *jcr, B_DB *mdb)
-{
- return -1;
-}
-
-int db_get_pool_ids(JCR *jcr, B_DB *mdb, int *num_ids, uint32_t *ids[])
-{ return 0; }
-
-bool db_get_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{ return 0; }
-
-int db_get_num_media_records(JCR *jcr, B_DB *mdb)
-{ return -1; }
-
-bool db_get_media_ids(JCR *jcr, B_DB *mdb, uint32_t PoolId, int *num_ids, uint32_t *ids[])
-{ return false; }
-
-bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{ return false; }
-
-int db_get_job_volume_names(JCR *jcr, B_DB *mdb, uint32_t JobId, POOLMEM **VolumeNames)
-{ return 0; }
-
-int db_get_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{ return 0; }
-
-int db_get_fileset_record(JCR *jcr, B_DB *mdb, FILESET_DBR *fsr)
-{ return 0; }
-
-bool db_get_query_dbids(JCR *jcr, B_DB *mdb, POOL_MEM &query, dbid_list &ids)
-{ return false; }
-
-int db_get_file_attributes_record(JCR *jcr, B_DB *mdb, char *fname, JOB_DBR *jr, FILE_DBR *fdbr)
-{ return 0; }
-
-int db_get_job_volume_parameters(JCR *jcr, B_DB *mdb, uint32_t JobId, VOL_PARAMS **VolParams)
-{ return 0; }
-
-int db_get_client_ids(JCR *jcr, B_DB *mdb, int *num_ids, uint32_t *ids[])
-{ return 0; }
-
-int db_get_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{ return 0; }
-
-int db_list_sql_query(JCR *jcr, B_DB *mdb, const char *query, DB_LIST_HANDLER *sendit,
- void *ctx, int verbose)
-{ return 0; }
-
-void db_list_pool_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_media_records(JCR *jcr, B_DB *mdb, MEDIA_DBR *mdbr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_jobmedia_records(JCR *jcr, B_DB *mdb, uint32_t JobId,
- DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_job_totals(JCR *jcr, B_DB *mdb, JOB_DBR *jr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_files_for_job(JCR *jcr, B_DB *mdb, uint32_t jobid, DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_client_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-int db_list_sql_query(JCR *jcr, B_DB *mdb, const char *query, DB_LIST_HANDLER *sendit,
- void *ctx, int verbose, e_list_type type)
-{
- return 0;
-}
-
-void
-db_list_pool_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void
-db_list_media_records(JCR *jcr, B_DB *mdb, MEDIA_DBR *mdbr,
- DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void db_list_jobmedia_records(JCR *jcr, B_DB *mdb, uint32_t JobId,
- DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void
-db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr, DB_LIST_HANDLER *sendit,
- void *ctx, e_list_type type)
-{ }
-
-void
-db_list_client_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-bool db_update_job_start_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return false;
-}
-
-/*
- * This is called at Job termination time to add all the
- * other fields to the job record.
- */
-int db_update_job_end_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr, bool stats_enabled)
-{
- return 0;
-}
-
-
-int db_update_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-int db_update_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-int db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest, int type)
-{
- return 1;
-}
-
-int db_mark_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, JobId_t JobId)
-{
- return 1;
-}
-
-int db_update_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{
- return 1;
-}
-
-int db_update_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{
- return 0;
-}
-
-int db_update_media_defaults(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 1;
-}
-
-void db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return;
-}
-
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Definitions common to the Bacula Database Routines (bdb).
- */
-
-/* bdb.c */
-extern int bdb_open_jobs_file(B_DB *mdb);
-extern int bdb_write_control_file(B_DB *mdb);
-extern int bdb_open_jobmedia_file(B_DB *mdb);
-extern int bdb_open_pools_file(B_DB *mdb);
-extern int bdb_open_media_file(B_DB *mdb);
-extern int bdb_open_client_file(B_DB *mdb);
-extern int bdb_open_fileset_file(B_DB *mdb);
-extern int db_get_media_record(B_DB *mdb, MEDIA_DBR *mr);
+++ /dev/null
-/*
- * Bacula Catalog Database Create record routines
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2001-2006 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* Forward referenced functions */
-bool db_create_pool_record(B_DB *mdb, POOL_DBR *pr);
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
-{
- return true;
-}
-
-int db_create_file_item(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
-{
- return 1;
-}
-
-
-/*
- * Create a new record for the Job
- * This record is created at the start of the Job,
- * it is updated in bdb_update.c when the Job terminates.
- *
- * Returns: 0 on failure
- * 1 on success
- */
-bool db_create_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return 0;
-}
-
-/* Create a JobMedia record for Volume used this job
- * Returns: 0 on failure
- * record-id on success
- */
-bool db_create_jobmedia_record(JCR *jcr, B_DB *mdb, JOBMEDIA_DBR *jm)
-{
- return 0;
-}
-
-
-/*
- * Create a unique Pool record
- * Returns: 0 on failure
- * 1 on success
- */
-bool db_create_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr)
-{ return false; }
-
-bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *dr)
-{ return false; }
-
-bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *dr)
-{ return false; }
-
-
-/*
- * Create Unique Media record. This record
- * contains all the data pertaining to a specific
- * Volume.
- *
- * Returns: 0 on failure
- * 1 on success
- */
-int db_create_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-
-/*
- * Create a unique Client record or return existing record
- * Returns: 0 on failure
- * 1 on success
- */
-int db_create_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{
- return 0;
-}
-
-/*
- * Create a unique FileSet record or return existing record
- *
- * Note, here we write the FILESET_DBR structure
- *
- * Returns: 0 on failure
- * 1 on success
- */
-bool db_create_fileset_record(JCR *jcr, B_DB *mdb, FILESET_DBR *fsr)
-{
- return false;
-}
-
-int db_create_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{ return 0; }
-
-bool db_write_batch_file_records(JCR *jcr) { return false; }
-bool my_batch_start(JCR *jcr, B_DB *mdb) { return false; }
-bool my_batch_end(JCR *jcr, B_DB *mdb, const char *error) { return false; }
-bool my_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) { return false; }
-
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Bacula Catalog Database Delete record interface routines
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2000-2006 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* Forward referenced functions */
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-
-/*
- * Delete a Pool record given the Name
- *
- * Returns: 0 on error
- * the number of records deleted on success
- */
-int db_delete_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-int db_delete_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Bacula Catalog Database Find record interface routines
- *
- * Note, generally, these routines are more complicated
- * that a simple search by name or id. Such simple
- * request are in get.c
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2001-2006 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* Forward referenced functions */
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-
-/*
- * Find job start time. Used to find last full save that terminated normally
- * so we can do Incremental and Differential saves.
- *
- * Returns: 0 on failure
- * 1 on success, jr unchanged, but stime set
- */
-bool db_find_job_start_time(JCR *jcr, B_DB *mdb, JOB_DBR *jr, POOLMEM **stime)
-{
- return 0;
-}
-
-
-/*
- * Find Available Media (Volume) for Pool
- *
- * Find a Volume for a given PoolId, MediaType, and VolStatus
- *
- * Note! this does not correctly implement InChanger.
- *
- * Returns: 0 on failure
- * numrows on success
- */
-int
-db_find_next_volume(JCR *jcr, B_DB *mdb, int item, bool InChanger, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-bool
-db_find_last_jobid(JCR *jcr, B_DB *mdb, const char *Name, JOB_DBR *jr)
-{ return false; }
-
-bool
-db_find_failed_job_since(JCR *jcr, B_DB *mdb, JOB_DBR *jr, POOLMEM *stime, int &JobLevel)
-{ return false; }
-
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Bacula Catalog Database Get record interface routines
- * Note, these routines generally get a record by id or
- * by name. If more logic is involved, the routine
- * should be in find.c
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2001-2006 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* Forward referenced functions */
-
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-
-/*
- * Get Job record for given JobId
- * Returns: 0 on failure
- * 1 on success
- */
-
-bool db_get_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return 0;
-}
-
-
-/*
- * Get the number of pool records
- *
- * Returns: -1 on failure
- * number on success
- */
-int db_get_num_pool_records(JCR *jcr, B_DB *mdb)
-{
- return -1;
-}
-
-/*
- * This function returns a list of all the Pool record ids.
- * The caller must free ids if non-NULL.
- *
- * Returns 0: on failure
- * 1: on success
- */
-int db_get_pool_ids(JCR *jcr, B_DB *mdb, int *num_ids, uint32_t *ids[])
-{
- return 0;
-}
-
-
-/*
- * Get Pool Record
- * If the PoolId is non-zero, we get its record,
- * otherwise, we search on the PoolName
- *
- * Returns: false on failure
- * true on success
- */
-bool db_get_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-/*
- * Get the number of Media records
- *
- * Returns: -1 on failure
- * number on success
- */
-int db_get_num_media_records(JCR *jcr, B_DB *mdb)
-{
- return -1;
-}
-
-/*
- * This function returns a list of all the Media record ids
- * for a specified PoolId
- * The caller must free ids if non-NULL.
- *
- * Returns false: on failure
- * true: on success
- */
-bool db_get_media_ids(JCR *jcr, B_DB *mdb, uint32_t PoolId, int *num_ids, uint32_t *ids[])
-{
- return false;
-}
-
-/*
- * Get Media Record
- * If the MediaId is non-zero, we get its record,
- * otherwise, we search on the MediaName
- *
- * Returns: false on failure
- * true on success
- */
-bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return false;
-}
-
-/*
- * Find VolumeNames for a give JobId
- * Returns: 0 on error or no Volumes found
- * number of volumes on success
- * Volumes are concatenated in VolumeNames
- * separated by a vertical bar (|).
- */
-int db_get_job_volume_names(JCR *jcr, B_DB *mdb, uint32_t JobId, POOLMEM **VolumeNames)
-{
- return 0;
-}
-
-/*
- * Get Client Record
- * If the ClientId is non-zero, we get its record,
- * otherwise, we search on the Name
- *
- * Returns: 0 on failure
- * id on success
- */
-int db_get_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{
- return 0;
-}
-
-/*
- * Get FileSet Record (We read the FILESET_DBR structure)
- * If the FileSetId is non-zero, we get its record,
- * otherwise, we search on the FileSet (its name).
- *
- * Returns: 0 on failure
- * id on success
- */
-int db_get_fileset_record(JCR *jcr, B_DB *mdb, FILESET_DBR *fsr)
-{
- return 0;
-}
-
-bool db_get_query_dbids(JCR *jcr, B_DB *mdb, POOL_MEM &query, dbid_list &ids)
-{ return false; }
-
-int db_get_file_attributes_record(JCR *jcr, B_DB *mdb, char *fname, JOB_DBR *jr, FILE_DBR *fdbr)
-{ return 0; }
-
-int db_get_job_volume_parameters(JCR *jcr, B_DB *mdb, uint32_t JobId, VOL_PARAMS **VolParams)
-{ return 0; }
-
-int db_get_client_ids(JCR *jcr, B_DB *mdb, int *num_ids, uint32_t *ids[])
-{ return 0; }
-
-int db_get_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{ return 0; }
-
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Bacula Catalog Database List records interface routines
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2001-2006 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* Forward referenced functions */
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-/*
- * Submit general SQL query
- */
-int db_list_sql_query(JCR *jcr, B_DB *mdb, const char *query, DB_LIST_HANDLER *sendit,
- void *ctx, int verbose)
-{
- return 0;
-}
-
-
-/*
- * List all the pool records
- */
-void db_list_pool_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx)
-{
- return;
-}
-
-
-/*
- * List Media records
- */
-void db_list_media_records(JCR *jcr, B_DB *mdb, MEDIA_DBR *mdbr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{
- return;
-}
-
-void db_list_jobmedia_records(JCR *jcr, B_DB *mdb, uint32_t JobId,
- DB_LIST_HANDLER *sendit, void *ctx)
-{
- return;
-}
-
-
-/*
- * List Job records
- */
-void db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{
- return;
-}
-
-
-/*
- * List Job Totals
- */
-void db_list_job_totals(JCR *jcr, B_DB *mdb, JOB_DBR *jr,
- DB_LIST_HANDLER *sendit, void *ctx)
-{
- return;
-}
-
-
-
-void db_list_files_for_job(JCR *jcr, B_DB *mdb, uint32_t jobid, DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-void db_list_client_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx)
-{ }
-
-int db_list_sql_query(JCR *jcr, B_DB *mdb, const char *query, DB_LIST_HANDLER *sendit,
- void *ctx, int verbose, e_list_type type)
-{
- return 0;
-}
-
-void
-db_list_pool_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void
-db_list_media_records(JCR *jcr, B_DB *mdb, MEDIA_DBR *mdbr,
- DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void db_list_jobmedia_records(JCR *jcr, B_DB *mdb, uint32_t JobId,
- DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-void
-db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr, DB_LIST_HANDLER *sendit,
- void *ctx, e_list_type type)
-{ }
-
-void
-db_list_client_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type)
-{ }
-
-
-
-
-#endif /* HAVE_BACULA_DB */
+++ /dev/null
-/*
- * Bacula Catalog Database Update record interface routines
- *
- * Bacula Catalog Database routines written specifically
- * for Bacula. Note, these routines are VERY dumb and
- * do not provide all the functionality of an SQL database.
- * The purpose of these routines is to ensure that Bacula
- * can limp along if no real database is loaded on the
- * system.
- *
- * Kern Sibbald, January MMI
- *
- *
- * Version $Id$
- */
-/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version three of the GNU Affero General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
-*/
-
-
-/* The following is necessary so that we do not include
- * the dummy external definition of DB.
- */
-#define __SQL_C /* indicate that this is sql.c */
-
-#include "bacula.h"
-#include "cats.h"
-#include "bdb.h"
-
-#ifdef HAVE_BACULA_DB
-
-/* -----------------------------------------------------------------------
- *
- * Bacula specific defines and subroutines
- *
- * -----------------------------------------------------------------------
- */
-
-
-/*
- * This is called at Job start time to add the
- * most current start fields to the job record.
- * It is assumed that you did a db_create_job_record() already.
- */
-bool db_update_job_start_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
-{
- return false;
-}
-
-/*
- * This is called at Job termination time to add all the
- * other fields to the job record.
- */
-int db_update_job_end_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr, bool stats_enabled)
-{
- return 0;
-}
-
-
-int db_update_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 0;
-}
-
-int db_update_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr)
-{
- return 0;
-}
-
-int db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest, int type)
-{
- return 1;
-}
-
-int db_mark_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, JobId_t JobId)
-{
- return 1;
-}
-
-int db_update_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr)
-{
- return 1;
-}
-
-int db_update_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr)
-{
- return 0;
-}
-
-int db_update_media_defaults(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return 1;
-}
-
-void db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr)
-{
- return;
-}
-
-#endif /* HAVE_BACULA_DB */
if (*pattern) {
Mmsg(filter, " AND Filename.Name %s '%s' ", SQL_MATCH, pattern);
}
-
+ /* TODO: Use JobTDate instead of FileId to determine the latest version */
POOL_MEM query;
Mmsg(query, // 1 2 3 4
"SELECT 'F', File.PathId, File.FilenameId, listfiles.Name, File.JobId, "
echo "Creating MySQL database"
@scriptdir@/create_mysql_database $*
elif test xingres = x@DB_TYPE@ ; then
- echo "Creating Ingres database"
+ echo "Creating Ingres database with $*"
@scriptdir@/create_ingres_database $*
else
echo "Creating PostgreSQL database"
+++ /dev/null
-#!/bin/sh
-#
-# shell script to create Bacula database(s)
-#
-# Nothing to do
#
bindir=@SQL_BINDIR@
-db_name=@db_name@
+PATH="$bindir:$PATH"
+db_name=${db_name:-@db_name@}
+db_user=${db_user:-@db_user@}
# use SQL_ASCII to be able to put any filename into
# the database even those created with unusual character sets
#
#ENCODING="ENCODING 'UTF8'"
-if createdb $* ${db_name}
+if createdb -u${db_user} $* ${db_name}
then
echo "Creation of ${db_name} database succeeded."
else
echo "Creation of ${db_name} database failed."
fi
+
exit 0
+++ /dev/null
-#!/bin/sh
-#
-# shell script to drop Bacula database(s)
-#
-# Nothing to do
+++ /dev/null
-#!/bin/sh
-#
-# shell script to Delete the Bacula database (same as deleting
-# the tables)
-#
-
-rm -f @working_dir@/control.db
-rm -f @working_dir@/jobs.db
-rm -f @working_dir@/pools.db
-rm -f @working_dir@/media.db
-rm -f @working_dir@/jobmedia.db
-rm -f @working_dir@/client.db
-rm -f @working_dir@/fileset.db
#
bindir=@SQL_BINDIR@
-db_name=@db_name@
+PATH="$bindir:$PATH"
+db_name=${db_name:-@db_name@}
+db_user=${db_user:-@db_user@}
-if destroydb ${db_name}
+if destroydb -u${db_user} ${db_name}
then
echo "Drop of ${db_name} database succeeded."
else
# shell script to delete Bacula tables for PostgreSQL
bindir=@SQL_BINDIR@
-db_name=@db_name@
+PATH="$bindir:$PATH"
+db_name=${db_name:-@db_name@}
+db_user=${db_user:-@db_user@}
sql -u${db_user} ${db_name} $* <<END-OF-DATA
-DROP TABLE filename\g
-DROP TABLE path\g
-DROP TABLE file\g
-DROP TABLE Job\g
-DROP TABLE JobHisto\g
-DROP TABLE Location\g
-DROP TABLE fileset\g
-DROP TABLE jobmedia\g
-DROP TABLE media\g
-DROP TABLE MediaType\g
-DROP TABLE Storage\g
-DROP TABLE Device\g
-DROP TABLE pool\g
-DROP TABLE client\g
-DROP TABLE Log\g
-DROP TABLE LocationLog\g
-DROP TABLE counters\g
-DROP TABLE basefiles\g
-DROP TABLE unsavedfiles\g
-DROP TABLE CDImages \g
-DROP TABLE PathHierarchy\g
-DROP TABLE PathVisibility\g
-DROP TABLE version\g
-DROP TABLE Status\g
-DROP SEQUENCE filename_seq\g
-DROP SEQUENCE path_seq\g
-DROP SEQUENCE file_seq\g
-DROP SEQUENCE Job_seq\g
-DROP SEQUENCE JobHisto_seq\g
-DROP SEQUENCE Location_seq\g
-DROP SEQUENCE fileset_seq\g
-DROP SEQUENCE jobmedia_seq\g
-DROP SEQUENCE media_seq\g
-DROP SEQUENCE MediaType_seq\g
-DROP SEQUENCE Storage_seq\g
-DROP SEQUENCE Device_seq\g
-DROP SEQUENCE pool_seq\g
-DROP SEQUENCE client_seq\g
-DROP SEQUENCE Log_seq\g
-DROP SEQUENCE LocationLog_seq\g
-DROP SEQUENCE basefiles_seq\g
+DROP TABLE Filename;
+DROP TABLE Path;
+DROP TABLE File;
+DROP TABLE RestoreObject;
+DROP TABLE Job;
+DROP TABLE JobHisto;
+DROP TABLE Location;
+DROP TABLE Fileset;
+DROP TABLE JobMedia;
+DROP TABLE Media;
+DROP TABLE MediaType;
+DROP TABLE Storage;
+DROP TABLE Device;
+DROP TABLE Pool;
+DROP TABLE Client;
+DROP TABLE Log;
+DROP TABLE LocationLog;
+DROP TABLE Counters;
+DROP TABLE BaseFiles;
+DROP TABLE UnsavedFiles;
+DROP TABLE CDImages ;
+DROP TABLE PathHierarchy;
+DROP TABLE PathVisibility;
+DROP TABLE Version;
+DROP TABLE Status;
+DROP SEQUENCE Filename_Seq;
+DROP SEQUENCE Path_Seq;
+DROP SEQUENCE File_Seq;
+DROP SEQUENCE RestoreObject_Seq;
+DROP SEQUENCE Job_Seq;
+DROP SEQUENCE JobHisto_Seq;
+DROP SEQUENCE Location_Seq;
+DROP SEQUENCE Fileset_Seq;
+DROP SEQUENCE JobMedia_Seq;
+DROP SEQUENCE Media_Seq;
+DROP SEQUENCE MediaType_Seq;
+DROP SEQUENCE Storage_Seq;
+DROP SEQUENCE Device_Seq;
+DROP SEQUENCE Pool_Seq;
+DROP SEQUENCE Client_Seq;
+DROP SEQUENCE Log_Seq;
+DROP SEQUENCE LocationLog_Seq;
+DROP SEQUENCE BaseFiles_Seq;
+\g
END-OF-DATA
pstat=$?
if test $pstat = 0;
DROP TABLE IF EXISTS LocationLog;
DROP TABLE IF EXISTS PathVisibility;
DROP TABLE IF EXISTS PathHierarchy;
+DROP TABLE IF EXISTS RestoreObject;
END-OF-DATA
then
echo "Deletion of ${db_name} MySQL tables succeeded."
drop table locationlog;
drop table PathVisibility;
drop table PathHierarchy;
+drop table RestoreObject;
END-OF-DATA
pstat=$?
if test $pstat = 0;
+++ /dev/null
-#!/bin/sh
-#
-# shell script to grant privileges to the bdb database
-#
-bindir=@SQL_BINDIR@
-
-# nothing to do here
\ No newline at end of file
#!/bin/sh
#
-# shell script to grant privileges to the bacula database
+# shell script TO GRANT privileges to the bacula database
#
-db_user=${db_user:-@db_user@}
bindir=@SQL_BINDIR@
+PATH="$bindir:$PATH"
db_name=${db_name:-@db_name@}
+db_user=${db_user:-@db_user@}
-if sql ${db_name} $* <<END-OF-DATA
+sql iidbdb $* <<END-OF-DATA
+CREATE USER ${db_user}
+\g
+END-OF-DATA
-create user ${db_user};
+if sql -u${db_user} ${db_name} $* <<END-OF-DATA
-- for tables
-grant all on unsavedfiles to ${db_user};
-grant all on basefiles to ${db_user};
-grant all on jobmedia to ${db_user};
-grant all on file to ${db_user};
-grant all on job to ${db_user};
-grant all on media to ${db_user};
-grant all on client to ${db_user};
-grant all on pool to ${db_user};
-grant all on fileset to ${db_user};
-grant all on path to ${db_user};
-grant all on filename to ${db_user};
-grant all on counters to ${db_user};
-grant all on version to ${db_user};
-grant all on cdimages to ${db_user};
-grant all on mediatype to ${db_user};
-grant all on storage to ${db_user};
-grant all on device to ${db_user};
-grant all on status to ${db_user};
-grant all on location to ${db_user};
-grant all on locationlog to ${db_user};
-grant all on log to ${db_user};
-grant all on jobhisto to ${db_user};
-
--- for sequences on those tables
+GRANT ALL ON TABLE Filename TO ${db_user};
+GRANT ALL ON TABLE Path TO ${db_user};
+GRANT ALL ON TABLE File TO ${db_user};
+GRANT ALL ON TABLE RestoreObject TO ${db_user};
+GRANT ALL ON TABLE Job TO ${db_user};
+GRANT ALL ON TABLE JobHisto TO ${db_user};
+GRANT ALL ON TABLE Location TO ${db_user};
+GRANT ALL ON TABLE Fileset TO ${db_user};
+GRANT ALL ON TABLE JobMedia TO ${db_user};
+GRANT ALL ON TABLE Media TO ${db_user};
+GRANT ALL ON TABLE MediaType TO ${db_user};
+GRANT ALL ON TABLE Storage TO ${db_user};
+GRANT ALL ON TABLE Device TO ${db_user};
+GRANT ALL ON TABLE Pool TO ${db_user};
+GRANT ALL ON TABLE Client TO ${db_user};
+GRANT ALL ON TABLE Log TO ${db_user};
+GRANT ALL ON TABLE LocationLog TO ${db_user};
+GRANT ALL ON TABLE Counters TO ${db_user};
+GRANT ALL ON TABLE BaseFiles TO ${db_user};
+GRANT ALL ON TABLE UnsavedFiles TO ${db_user};
+GRANT ALL ON TABLE CDImages TO ${db_user};
+GRANT ALL ON TABLE PathHierarchy TO ${db_user};
+GRANT ALL ON TABLE PathVisibility TO ${db_user};
+GRANT ALL ON TABLE Version TO ${db_user};
+GRANT ALL ON TABLE Status TO ${db_user};
-grant select, update on filename_filenameid_seq to ${db_user};
-grant select, update on path_pathid_seq to ${db_user};
-grant select, update on fileset_filesetid_seq to ${db_user};
-grant select, update on pool_poolid_seq to ${db_user};
-grant select, update on client_clientid_seq to ${db_user};
-grant select, update on media_mediaid_seq to ${db_user};
-grant select, update on job_jobid_seq to ${db_user};
-grant select, update on file_fileid_seq to ${db_user};
-grant select, update on jobmedia_jobmediaid_seq to ${db_user};
-grant select, update on basefiles_baseid_seq to ${db_user};
-grant select, update on storage_storageid_seq to ${db_user};
-grant select, update on mediatype_mediatypeid_seq to ${db_user};
-grant select, update on device_deviceid_seq to ${db_user};
-grant select, update on location_locationid_seq to ${db_user};
-grant select, update on locationlog_loclogid_seq to ${db_user};
-grant select, update on log_logid_seq to ${db_user};
+-- for sequences ON those tables
+GRANT NEXT ON SEQUENCE Filename_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Path_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE File_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE RestoreObject_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Job_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE JobHisto_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Location_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Fileset_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Jobmedia_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Media_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE MediaType_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Storage_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Device_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Pool_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Client_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE Log_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE LocationLog_Seq TO ${db_user};
+GRANT NEXT ON SEQUENCE BaseFiles_Seq TO ${db_user};
+\g
END-OF-DATA
then
+++ /dev/null
-#!/bin/sh
-#
-# shell script to create Bacula PostgreSQL tables
-#
-bindir=@SQL_BINDIR@
-db_name=@db_name@
-
-sql $* ${db_name}
+++ /dev/null
-#!/bin/sh
-#
-# shell script to create Bacula tables
-#
-# Nothing to do -- created by Bacula
-#
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
if ($args{db_address}) {
$ENV{PGHOST}=$args{db_address};
}
+ if ($args{db_socket}) {
+ $ENV{PGHOST}=$args{db_socket};
+ }
if ($args{db_port}) {
$ENV{PGPORT}=$args{db_port};
}
unlink("$wd/.my.cnf");
open(MY, ">$wd/.my.cnf")
or die "Can't open $wd/.my.cnf for writing $@";
+
$args{db_address} = $args{db_address} || "localhost";
+ my $addr = "host=$args{db_address}";
+ if ($args{db_socket}) { # unix socket is fastest than net socket
+ $addr = "socket=$args{db_socket}";
+ }
+
print MY "[client]
-host=$args{db_address}
+$addr
user=$args{db_user}
password=$args{db_password}
";
+++ /dev/null
-#include "bacula.h"
-/* # line 3 "myingres.sc" */
-#ifdef HAVE_INGRES
-#include <eqpname.h>
-#include <eqdefcc.h>
-#include <eqsqlca.h>
-extern IISQLCA sqlca; /* SQL Communications Area */
-#include <eqsqlda.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include "myingres.h"
-/*
- * ---Implementations---
- */
-int INGcheck()
-{
- return (sqlca.sqlcode < 0) ? sqlca.sqlcode : 0;
-}
-short INGgetCols(const char *stmt)
-{
-/* # line 23 "myingres.sc" */
-
- char *stmtd;
-/* # line 25 "myingres.sc" */
-
- short number = 1;
- IISQLDA *sqlda;
- sqlda = (IISQLDA *)malloc(IISQDA_HEAD_SIZE + (number * IISQDA_VAR_SIZE));
- memset(sqlda, 0, (IISQDA_HEAD_SIZE + (number * IISQDA_VAR_SIZE)));
- sqlda->sqln = number;
- stmtd = (char*)malloc(strlen(stmt)+1);
- bstrncpy(stmtd,stmt,strlen(stmt)+1);
-/* # line 38 "myingres.sc" */ /* prepare */
- {
- IIsqInit(&sqlca);
- IIsqPrepare(0,(char *)"s1",(char *)0,0,stmtd);
- }
-/* # line 39 "myingres.sc" */ /* host code */
- if (INGcheck() < 0) {
- free(stmtd);
- free(sqlda);
- return -1;
- }
-/* # line 44 "myingres.sc" */ /* describe */
- {
- IIsqInit(&sqlca);
- IIsqDescribe(0,(char *)"s1",sqlda,0);
- }
-/* # line 45 "myingres.sc" */ /* host code */
- if (INGcheck() < 0) {
- free(stmtd);
- free(sqlda);
- return -1;
- }
- number = sqlda->sqld;
- free(stmtd);
- free(sqlda);
- return number;
-}
-IISQLDA *INGgetDescriptor(short numCols, const char *stmt)
-{
-/* # line 59 "myingres.sc" */
-
- char *stmtd;
-/* # line 61 "myingres.sc" */
-
- int i;
- IISQLDA *sqlda;
- sqlda = (IISQLDA *)malloc(IISQDA_HEAD_SIZE + (numCols * IISQDA_VAR_SIZE));
- memset(sqlda, 0, (IISQDA_HEAD_SIZE + (numCols * IISQDA_VAR_SIZE)));
- sqlda->sqln = numCols;
- stmtd = (char *)malloc(strlen(stmt)+1);
- bstrncpy(stmtd,stmt,strlen(stmt)+1);
-/* # line 74 "myingres.sc" */ /* prepare */
- {
- IIsqInit(&sqlca);
- IIsqPrepare(0,(char *)"s2",sqlda,0,stmtd);
- }
-/* # line 76 "myingres.sc" */ /* host code */
- free(stmtd);
- for (i = 0; i < sqlda->sqld; ++i) {
- /*
- * Alloc space for variable like indicated in sqllen
- * for date types sqllen is always 0 -> allocate by type
- */
- switch (abs(sqlda->sqlvar[i].sqltype)) {
- case IISQ_TSW_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSW_LEN);
- break;
- case IISQ_TSWO_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSWO_LEN);
- break;
- case IISQ_TSTMP_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSTMP_LEN);
- break;
- default:
- sqlda->sqlvar[i].sqldata = (char *)malloc(sqlda->sqlvar[i].sqllen);
- break;
- }
- }
- return sqlda;
-}
-void INGfreeDescriptor(IISQLDA *sqlda)
-{
- if (!sqlda) {
- return;
- }
- int i;
- for (i = 0; i < sqlda->sqld; ++i) {
- if (sqlda->sqlvar[i].sqldata) {
- free(sqlda->sqlvar[i].sqldata);
- }
- if (sqlda->sqlvar[i].sqlind) {
- free(sqlda->sqlvar[i].sqlind);
- }
- }
- free(sqlda);
- sqlda = NULL;
-}
-int INGgetTypeSize(IISQLVAR *ingvar)
-{
- int inglength = 0;
- /*
- * TODO: add date types (at least TSTMP,TSW TSWO)
- */
- switch (ingvar->sqltype) {
- case IISQ_DTE_TYPE:
- inglength = 25;
- break;
- case IISQ_MNY_TYPE:
- inglength = 8;
- break;
- default:
- inglength = ingvar->sqllen;
- break;
- }
- return inglength;
-}
-INGresult *INGgetINGresult(IISQLDA *sqlda)
-{
- if (!sqlda) {
- return NULL;
- }
- int i;
- INGresult *result = NULL;
- result = (INGresult *)malloc(sizeof(INGresult));
- memset(result, 0, sizeof(INGresult));
- result->sqlda = sqlda;
- result->num_fields = sqlda->sqld;
- result->num_rows = 0;
- result->first_row = NULL;
- result->status = ING_EMPTY_RESULT;
- result->act_row = NULL;
- memset(result->numrowstring, 0, sizeof(result->numrowstring));
- if (result->num_fields) {
- result->fields = (INGRES_FIELD *)malloc(sizeof(INGRES_FIELD) * result->num_fields);
- memset(result->fields, 0, sizeof(INGRES_FIELD) * result->num_fields);
- for (i = 0; i < result->num_fields; ++i) {
- memset(result->fields[i].name, 0, 34);
- bstrncpy(result->fields[i].name, sqlda->sqlvar[i].sqlname.sqlnamec, sqlda->sqlvar[i].sqlname.sqlnamel);
- result->fields[i].max_length = INGgetTypeSize(&sqlda->sqlvar[i]);
- result->fields[i].type = abs(sqlda->sqlvar[i].sqltype);
- result->fields[i].flags = (abs(sqlda->sqlvar[i].sqltype)<0) ? 1 : 0;
- }
- }
- return result;
-}
-void INGfreeINGresult(INGresult *ing_res)
-{
- if (!ing_res) {
- return;
- }
- int rows;
- ING_ROW *rowtemp;
- /*
- * Free all rows and fields, then res, not descriptor!
- */
- if (ing_res != NULL) {
- /*
- * Use of rows is a nasty workaround til I find the reason,
- * why aggregates like max() don't work
- */
- rows = ing_res->num_rows;
- ing_res->act_row = ing_res->first_row;
- while (ing_res->act_row != NULL && rows > 0) {
- rowtemp = ing_res->act_row->next;
- INGfreeRowSpace(ing_res->act_row, ing_res->sqlda);
- ing_res->act_row = rowtemp;
- --rows;
- }
- if (ing_res->fields) {
- free(ing_res->fields);
- }
- }
- free(ing_res);
- ing_res = NULL;
-}
-ING_ROW *INGgetRowSpace(INGresult *ing_res)
-{
- int i;
- unsigned short len; /* used for VARCHAR type length */
- IISQLDA *sqlda = ing_res->sqlda;
- ING_ROW *row = NULL;
- IISQLVAR *vars = NULL;
- row = (ING_ROW *)malloc(sizeof(ING_ROW));
- memset(row, 0, sizeof(ING_ROW));
- vars = (IISQLVAR *)malloc(sizeof(IISQLVAR) * sqlda->sqld);
- memset(vars, 0, sizeof(IISQLVAR) * sqlda->sqld);
- row->sqlvar = vars;
- row->next = NULL;
- for (i = 0; i < sqlda->sqld; ++i) {
- /*
- * Make strings out of the data, then the space and assign
- * (why string? at least it seems that way, looking into the sources)
- */
- switch (ing_res->fields[i].type) {
- case IISQ_VCH_TYPE:
- len = ((ING_VARCHAR *)sqlda->sqlvar[i].sqldata)->len;
- vars[i].sqldata = (char *)malloc(len+1);
- memcpy(vars[i].sqldata,sqlda->sqlvar[i].sqldata+2,len);
- vars[i].sqldata[len] = '\0';
- break;
- case IISQ_CHA_TYPE:
- vars[i].sqldata = (char *)malloc(ing_res->fields[i].max_length+1);
- memcpy(vars[i].sqldata,sqlda->sqlvar[i].sqldata,sqlda->sqlvar[i].sqllen);
- vars[i].sqldata[ing_res->fields[i].max_length] = '\0';
- break;
- case IISQ_INT_TYPE:
- vars[i].sqldata = (char *)malloc(20);
- memset(vars[i].sqldata, 0, 20);
- switch (sqlda->sqlvar[i].sqllen) {
- case 2:
- bsnprintf(vars[i].sqldata, 20, "%d",*(short*)sqlda->sqlvar[i].sqldata);
- break;
- case 4:
- bsnprintf(vars[i].sqldata, 20, "%ld",*(int*)sqlda->sqlvar[i].sqldata);
- break;
- case 8:
- bsnprintf(vars[i].sqldata, 20, "%lld",*(long*)sqlda->sqlvar[i].sqldata);
- break;
- }
- break;
- case IISQ_TSTMP_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSTMP_LEN+1);
- vars[i].sqldata[IISQ_TSTMP_LEN] = '\0';
- break;
- case IISQ_TSWO_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSWO_LEN+1);
- vars[i].sqldata[IISQ_TSWO_LEN] = '\0';
- break;
- case IISQ_TSW_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSW_LEN+1);
- vars[i].sqldata[IISQ_TSW_LEN] = '\0';
- break;
- }
- vars[i].sqlind = (short *)malloc(sizeof(short));
- if (sqlda->sqlvar[i].sqlind) {
- memcpy(vars[i].sqlind,sqlda->sqlvar[i].sqlind,sizeof(short));
- } else {
- *vars[i].sqlind = 0;
- }
- }
- return row;
-}
-void INGfreeRowSpace(ING_ROW *row, IISQLDA *sqlda)
-{
- int i;
- if (row == NULL || sqlda == NULL) {
- return;
- }
- for (i = 0; i < sqlda->sqld; ++i) {
- if (row->sqlvar[i].sqldata) {
- free(row->sqlvar[i].sqldata);
- }
- if (row->sqlvar[i].sqlind) {
- free(row->sqlvar[i].sqlind);
- }
- }
- free(row->sqlvar);
- free(row);
-}
-int INGfetchAll(const char *stmt, INGresult *ing_res)
-{
- int linecount = 0;
- ING_ROW *row;
- IISQLDA *desc;
- int check = -1;
- desc = ing_res->sqlda;
-/* # line 317 "myingres.sc" */ /* host code */
- if ((check = INGcheck()) < 0) {
- return check;
- }
-/* # line 321 "myingres.sc" */ /* open */
- {
- IIsqInit(&sqlca);
- IIcsOpen((char *)"c2",9341,8444);
- IIwritio(0,(short *)0,1,32,0,(char *)"s2");
- IIcsQuery((char *)"c2",9341,8444);
- }
-/* # line 322 "myingres.sc" */ /* host code */
- if ((check = INGcheck()) < 0) {
- return check;
- }
- /* for (linecount = 0; sqlca.sqlcode == 0; ++linecount) */
- do {
-/* # line 328 "myingres.sc" */ /* fetch */
- {
- IIsqInit(&sqlca);
- if (IIcsRetScroll((char *)"c2",9341,8444,-1,-1) != 0) {
- IIcsDaGet(0,desc);
- IIcsERetrieve();
- } /* IIcsRetrieve */
- }
-/* # line 330 "myingres.sc" */ /* host code */
- if ( (sqlca.sqlcode == 0) || (sqlca.sqlcode == -40202) ) {
- row = INGgetRowSpace(ing_res); /* alloc space for fetched row */
- /*
- * Initialize list when encountered first time
- */
- if (ing_res->first_row == 0) {
- ing_res->first_row = row; /* head of the list */
- ing_res->first_row->next = NULL;
- ing_res->act_row = ing_res->first_row;
- }
- ing_res->act_row->next = row; /* append row to old act_row */
- ing_res->act_row = row; /* set row as act_row */
- ++linecount;
- row->row_number = linecount;
- }
- } while ( (sqlca.sqlcode == 0) || (sqlca.sqlcode == -40202) );
-/* # line 348 "myingres.sc" */ /* close */
- {
- IIsqInit(&sqlca);
- IIcsClose((char *)"c2",9341,8444);
- }
-/* # line 350 "myingres.sc" */ /* host code */
- ing_res->status = ING_COMMAND_OK;
- ing_res->num_rows = linecount;
- return linecount;
-}
-ING_STATUS INGresultStatus(INGresult *res)
-{
- if (res == NULL) {return ING_NO_RESULT;}
- return res->status;
-}
-void INGrowSeek(INGresult *res, int row_number)
-{
- ING_ROW *trow = NULL;
- if (res->act_row->row_number == row_number) {
- return;
- }
- /*
- * TODO: real error handling
- */
- if (row_number<0 || row_number>res->num_rows) {
- return;
- }
- for (trow = res->first_row ; trow->row_number != row_number ; trow = trow->next );
- res->act_row = trow;
- /*
- * Note - can be null - if row_number not found, right?
- */
-}
-char *INGgetvalue(INGresult *res, int row_number, int column_number)
-{
- if (row_number != res->act_row->row_number) {
- INGrowSeek(res, row_number);
- }
- return res->act_row->sqlvar[column_number].sqldata;
-}
-int INGgetisnull(INGresult *res, int row_number, int column_number)
-{
- if (row_number != res->act_row->row_number) {
- INGrowSeek(res, row_number);
- }
- return (short)*res->act_row->sqlvar[column_number].sqlind;
-}
-int INGntuples(const INGresult *res)
-{
- return res->num_rows;
-}
-int INGnfields(const INGresult *res)
-{
- return res->num_fields;
-}
-char *INGfname(const INGresult *res, int column_number)
-{
- if ((column_number > res->num_fields) || (column_number < 0)) {
- return NULL;
- } else {
- return res->fields[column_number].name;
- }
-}
-short INGftype(const INGresult *res, int column_number)
-{
- return res->fields[column_number].type;
-}
-int INGexec(INGconn *conn, const char *query)
-{
- int check;
-/* # line 425 "myingres.sc" */
-
- int rowcount;
- char *stmt;
-/* # line 428 "myingres.sc" */
-
- stmt = (char *)malloc(strlen(query)+1);
- bstrncpy(stmt,query,strlen(query)+1);
- rowcount = -1;
-/* # line 434 "myingres.sc" */ /* execute */
- {
- IIsqInit(&sqlca);
- IIsqExImmed(stmt);
- IIsyncup((char *)0,0);
- }
-/* # line 435 "myingres.sc" */ /* host code */
- free(stmt);
- if ((check = INGcheck()) < 0) {
- return check;
- }
-/* # line 440 "myingres.sc" */ /* inquire_ingres */
- {
- IILQisInqSqlio((short *)0,1,30,sizeof(rowcount),&rowcount,8);
- }
-/* # line 441 "myingres.sc" */ /* host code */
- if ((check = INGcheck()) < 0) {
- return check;
- }
- return rowcount;
-}
-INGresult *INGquery(INGconn *conn, const char *query)
-{
- /*
- * TODO: error handling
- */
- IISQLDA *desc = NULL;
- INGresult *res = NULL;
- int rows = -1;
- int cols = INGgetCols(query);
- desc = INGgetDescriptor(cols, query);
- if (!desc) {
- return NULL;
- }
- res = INGgetINGresult(desc);
- if (!res) {
- return NULL;
- }
- rows = INGfetchAll(query, res);
- if (rows < 0) {
- INGfreeINGresult(res);
- INGfreeDescriptor(desc);
- return NULL;
- }
- return res;
-}
-void INGclear(INGresult *res)
-{
- if (res == NULL) {
- return;
- }
- IISQLDA *desc = res->sqlda;
- INGfreeINGresult(res);
- INGfreeDescriptor(desc);
-}
-INGconn *INGconnectDB(char *dbname, char *user, char *passwd)
-{
- if (dbname == NULL || strlen(dbname) == 0) {
- return NULL;
- }
- INGconn *dbconn = (INGconn *)malloc(sizeof(INGconn));
- memset(dbconn, 0, sizeof(INGconn));
-/* # line 495 "myingres.sc" */
-
- char ingdbname[24];
- char ingdbuser[32];
- char ingdbpasw[32];
- char conn_name[32];
- int sess_id;
-/* # line 501 "myingres.sc" */
-
- bstrncpy(ingdbname, dbname, sizeof(ingdbname));
- if (user != NULL) {
- bstrncpy(ingdbuser, user, sizeof(ingdbuser));
- if (passwd != NULL) {
- bstrncpy(ingdbpasw, passwd, sizeof(ingdbpasw));
- } else {
- memset(ingdbpasw, 0, sizeof(ingdbpasw));
- }
-/* # line 512 "myingres.sc" */ /* connect */
- {
- IIsqInit(&sqlca);
- IIsqUser(ingdbuser);
- IIsqConnect(0,ingdbname,(char *)"-dbms_password",ingdbpasw,(char *)0,
- (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0,
- (char *)0, (char *)0, (char *)0, (char *)0);
- }
-/* # line 516 "myingres.sc" */ /* host code */
- } else {
-/* # line 517 "myingres.sc" */ /* connect */
- {
- IIsqInit(&sqlca);
- IIsqConnect(0,ingdbname,(char *)0, (char *)0, (char *)0, (char *)0,
- (char *)0, (char *)0, (char *)0, (char *)0, (char *)0, (char *)0,
- (char *)0, (char *)0, (char *)0);
- }
-/* # line 518 "myingres.sc" */ /* host code */
- }
-/* # line 520 "myingres.sc" */ /* inquire_sql */
- {
- IILQisInqSqlio((short *)0,1,32,31,conn_name,13);
- }
-/* # line 521 "myingres.sc" */ /* inquire_sql */
- {
- IILQisInqSqlio((short *)0,1,30,sizeof(sess_id),&sess_id,11);
- }
-/* # line 523 "myingres.sc" */ /* host code */
- bstrncpy(dbconn->dbname, ingdbname, sizeof(dbconn->dbname));
- bstrncpy(dbconn->user, ingdbuser, sizeof(dbconn->user));
- bstrncpy(dbconn->password, ingdbpasw, sizeof(dbconn->password));
- bstrncpy(dbconn->connection_name, conn_name, sizeof(dbconn->connection_name));
- dbconn->session_id = sess_id;
- dbconn->msg = (char*)malloc(257);
- memset(dbconn->msg, 0, 257);
- return dbconn;
-}
-void INGdisconnectDB(INGconn *dbconn)
-{
- /*
- * TODO: check for any real use of dbconn: maybe whenn multithreaded?
- */
-/* # line 539 "myingres.sc" */ /* disconnect */
- {
- IIsqInit(&sqlca);
- IIsqDisconnect();
- }
-/* # line 540 "myingres.sc" */ /* host code */
- if (dbconn != NULL) {
- free(dbconn->msg);
- free(dbconn);
- }
-}
-char *INGerrorMessage(const INGconn *conn)
-{
-/* # line 548 "myingres.sc" */
-
- char errbuf[256];
-/* # line 550 "myingres.sc" */
-
-/* # line 552 "myingres.sc" */ /* inquire_ingres */
- {
- IILQisInqSqlio((short *)0,1,32,255,errbuf,63);
- }
-/* # line 553 "myingres.sc" */ /* host code */
- memcpy(conn->msg,&errbuf,256);
- return conn->msg;
-}
-char *INGcmdTuples(INGresult *res)
-{
- return res->numrowstring;
-}
-/* TODO?
-int INGputCopyEnd(INGconn *conn, const char *errormsg);
-int INGputCopyData(INGconn *conn, const char *buffer, int nbytes);
-*/
-/* # line 567 "myingres.sc" */
-#endif
+++ /dev/null
-#ifndef _MYINGRES_SH
-#define _MYINGRES_SH
-#include <eqpname.h>
-#include <eqdefcc.h>
-#include <eqsqlda.h>
-/* # line 6 "myingres.sh" */ /* host code */
-/* ---typedefs--- */
-typedef struct ing_field {
- char name[34];
- int max_length;
- unsigned int type;
- unsigned int flags; // 1 == not null
-} INGRES_FIELD;
-typedef struct ing_row {
- IISQLVAR *sqlvar; /* ptr to sqlvar[sqld] for one row */
- struct ing_row *next;
- int row_number;
-} ING_ROW;
-typedef enum ing_status {
- ING_COMMAND_OK,
- ING_TUPLES_OK,
- ING_NO_RESULT,
- ING_NO_ROWS_PROCESSED,
- ING_EMPTY_RESULT,
- ING_ERROR
-} ING_STATUS;
-typedef struct ing_varchar {
- short len;
- char* value;
-} ING_VARCHAR;
-/* It seems, Bacula needs the complete query result stored in one data structure */
-typedef struct ing_result {
- IISQLDA *sqlda; /* descriptor */
- INGRES_FIELD *fields;
- int num_rows;
- int num_fields;
- ING_STATUS status;
- ING_ROW *first_row;
- ING_ROW *act_row; /* just for iterating */
- char numrowstring[10];
-} INGresult;
-typedef struct ing_conn {
- char dbname[24];
- char user[32];
- char password[32];
- char connection_name[32];
- int session_id;
- char *msg;
-} INGconn;
-/* ---Prototypes--- */
-int INGcheck();
-ING_STATUS INGresultStatus(INGresult *res);
-short INGgetCols(const char *stmt);
-IISQLDA *INGgetDescriptor(short numCols, const char *stmt);
-void INGfreeDescriptor(IISQLDA *sqlda);
-int INGgetTypeSize(IISQLVAR *ingvar);
-INGresult *INGgetINGresult(IISQLDA *sqlda);
-void INGfreeINGresult(INGresult *ing_res);
-ING_ROW *INGgetRowSpace(INGresult *ing_res);
-void INGfreeRowSpace(ING_ROW *row, IISQLDA *sqlda);
-int INGfetchAll(const char *stmt, INGresult *ing_res);
-void INGrowSeek(INGresult *res, int row_number);
-char *INGgetvalue(INGresult *res, int row_number, int column_number);
-int INGgetisnull(INGresult *res, int row_number, int column_number);
-int INGntuples(const INGresult *res);
-int INGnfields(const INGresult *res);
-char *INGfname(const INGresult *res, int column_number);
-short INGftype(const INGresult *res, int column_number);
-INGresult *INGexec(INGconn *db, const char *query);
-void INGclear(INGresult *res);
-INGconn *INGconnectDB(char *dbname, char *user, char *passwd);
-void INGdisconnectDB(INGconn *dbconn);
-char *INGerrorMessage(const INGconn *conn);
-char *INGcmdTuples(INGresult *res);
-/* # line 85 "myingres.sh" */
-#endif /* _MYINGRES_SH */
+++ /dev/null
-#include "bacula.h"
-
-#ifdef HAVE_INGRES
-EXEC SQL INCLUDE SQLCA;
-EXEC SQL INCLUDE SQLDA;
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "myingres.h"
-
-/*
- * ---Implementations---
- */
-int INGcheck()
-{
- return (sqlca.sqlcode < 0) ? sqlca.sqlcode : 0;
-}
-
-short INGgetCols(const char *stmt)
-{
- EXEC SQL BEGIN DECLARE SECTION;
- char *stmtd;
- EXEC SQL END DECLARE SECTION;
-
- short number = 1;
- IISQLDA *sqlda;
-
- sqlda = (IISQLDA *)malloc(IISQDA_HEAD_SIZE + (number * IISQDA_VAR_SIZE));
- memset(sqlda, 0, (IISQDA_HEAD_SIZE + (number * IISQDA_VAR_SIZE)));
-
- sqlda->sqln = number;
-
- stmtd = (char*)malloc(strlen(stmt)+1);
- bstrncpy(stmtd,stmt,strlen(stmt)+1);
-
- EXEC SQL PREPARE s1 from :stmtd;
- if (INGcheck() < 0) {
- free(stmtd);
- free(sqlda);
- return -1;
- }
- EXEC SQL DESCRIBE s1 into :sqlda;
- if (INGcheck() < 0) {
- free(stmtd);
- free(sqlda);
- return -1;
- }
-
- number = sqlda->sqld;
- free(stmtd);
- free(sqlda);
- return number;
-}
-
-IISQLDA *INGgetDescriptor(short numCols, const char *stmt)
-{
- EXEC SQL BEGIN DECLARE SECTION;
- char *stmtd;
- EXEC SQL END DECLARE SECTION;
-
- int i;
- IISQLDA *sqlda;
-
- sqlda = (IISQLDA *)malloc(IISQDA_HEAD_SIZE + (numCols * IISQDA_VAR_SIZE));
- memset(sqlda, 0, (IISQDA_HEAD_SIZE + (numCols * IISQDA_VAR_SIZE)));
-
- sqlda->sqln = numCols;
-
- stmtd = (char *)malloc(strlen(stmt)+1);
- bstrncpy(stmtd,stmt,strlen(stmt)+1);
-
- EXEC SQL PREPARE s2 INTO :sqlda FROM :stmtd;
-
- free(stmtd);
-
- for (i = 0; i < sqlda->sqld; ++i) {
- /*
- * Alloc space for variable like indicated in sqllen
- * for date types sqllen is always 0 -> allocate by type
- */
- switch (abs(sqlda->sqlvar[i].sqltype)) {
- case IISQ_TSW_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSW_LEN);
- break;
- case IISQ_TSWO_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSWO_LEN);
- break;
- case IISQ_TSTMP_TYPE:
- sqlda->sqlvar[i].sqldata = (char *)malloc(IISQ_TSTMP_LEN);
- break;
- default:
- sqlda->sqlvar[i].sqldata = (char *)malloc(sqlda->sqlvar[i].sqllen);
- break;
- }
- }
-
- return sqlda;
-}
-
-void INGfreeDescriptor(IISQLDA *sqlda)
-{
- if (!sqlda) {
- return;
- }
-
- int i;
-
- for (i = 0; i < sqlda->sqld; ++i) {
- if (sqlda->sqlvar[i].sqldata) {
- free(sqlda->sqlvar[i].sqldata);
- }
- if (sqlda->sqlvar[i].sqlind) {
- free(sqlda->sqlvar[i].sqlind);
- }
- }
- free(sqlda);
- sqlda = NULL;
-}
-
-int INGgetTypeSize(IISQLVAR *ingvar)
-{
- int inglength = 0;
-
- /*
- * TODO: add date types (at least TSTMP,TSW TSWO)
- */
- switch (ingvar->sqltype) {
- case IISQ_DTE_TYPE:
- inglength = 25;
- break;
- case IISQ_MNY_TYPE:
- inglength = 8;
- break;
- default:
- inglength = ingvar->sqllen;
- break;
- }
-
- return inglength;
-}
-
-INGresult *INGgetINGresult(IISQLDA *sqlda)
-{
- if (!sqlda) {
- return NULL;
- }
-
- int i;
- INGresult *result = NULL;
-
- result = (INGresult *)malloc(sizeof(INGresult));
- memset(result, 0, sizeof(INGresult));
-
- result->sqlda = sqlda;
- result->num_fields = sqlda->sqld;
- result->num_rows = 0;
- result->first_row = NULL;
- result->status = ING_EMPTY_RESULT;
- result->act_row = NULL;
- memset(result->numrowstring, 0, sizeof(result->numrowstring));
-
- if (result->num_fields) {
- result->fields = (INGRES_FIELD *)malloc(sizeof(INGRES_FIELD) * result->num_fields);
- memset(result->fields, 0, sizeof(INGRES_FIELD) * result->num_fields);
-
- for (i = 0; i < result->num_fields; ++i) {
- memset(result->fields[i].name, 0, 34);
- bstrncpy(result->fields[i].name, sqlda->sqlvar[i].sqlname.sqlnamec, sqlda->sqlvar[i].sqlname.sqlnamel);
- result->fields[i].max_length = INGgetTypeSize(&sqlda->sqlvar[i]);
- result->fields[i].type = abs(sqlda->sqlvar[i].sqltype);
- result->fields[i].flags = (abs(sqlda->sqlvar[i].sqltype)<0) ? 1 : 0;
- }
- }
-
- return result;
-}
-
-void INGfreeINGresult(INGresult *ing_res)
-{
- if (!ing_res) {
- return;
- }
-
- int rows;
- ING_ROW *rowtemp;
-
- /*
- * Free all rows and fields, then res, not descriptor!
- */
- if (ing_res != NULL) {
- /*
- * Use of rows is a nasty workaround til I find the reason,
- * why aggregates like max() don't work
- */
- rows = ing_res->num_rows;
- ing_res->act_row = ing_res->first_row;
- while (ing_res->act_row != NULL && rows > 0) {
- rowtemp = ing_res->act_row->next;
- INGfreeRowSpace(ing_res->act_row, ing_res->sqlda);
- ing_res->act_row = rowtemp;
- --rows;
- }
- if (ing_res->fields) {
- free(ing_res->fields);
- }
- }
- free(ing_res);
- ing_res = NULL;
-}
-
-ING_ROW *INGgetRowSpace(INGresult *ing_res)
-{
- int i;
- unsigned short len; /* used for VARCHAR type length */
- IISQLDA *sqlda = ing_res->sqlda;
- ING_ROW *row = NULL;
- IISQLVAR *vars = NULL;
-
- row = (ING_ROW *)malloc(sizeof(ING_ROW));
- memset(row, 0, sizeof(ING_ROW));
-
- vars = (IISQLVAR *)malloc(sizeof(IISQLVAR) * sqlda->sqld);
- memset(vars, 0, sizeof(IISQLVAR) * sqlda->sqld);
-
- row->sqlvar = vars;
- row->next = NULL;
-
- for (i = 0; i < sqlda->sqld; ++i) {
- /*
- * Make strings out of the data, then the space and assign
- * (why string? at least it seems that way, looking into the sources)
- */
- switch (ing_res->fields[i].type) {
- case IISQ_VCH_TYPE:
- len = ((ING_VARCHAR *)sqlda->sqlvar[i].sqldata)->len;
- vars[i].sqldata = (char *)malloc(len+1);
- memcpy(vars[i].sqldata,sqlda->sqlvar[i].sqldata+2,len);
- vars[i].sqldata[len] = '\0';
- break;
- case IISQ_CHA_TYPE:
- vars[i].sqldata = (char *)malloc(ing_res->fields[i].max_length+1);
- memcpy(vars[i].sqldata,sqlda->sqlvar[i].sqldata,sqlda->sqlvar[i].sqllen);
- vars[i].sqldata[ing_res->fields[i].max_length] = '\0';
- break;
- case IISQ_INT_TYPE:
- vars[i].sqldata = (char *)malloc(20);
- memset(vars[i].sqldata, 0, 20);
- switch (sqlda->sqlvar[i].sqllen) {
- case 2:
- bsnprintf(vars[i].sqldata, 20, "%d",*(short*)sqlda->sqlvar[i].sqldata);
- break;
- case 4:
- bsnprintf(vars[i].sqldata, 20, "%ld",*(int*)sqlda->sqlvar[i].sqldata);
- break;
- case 8:
- bsnprintf(vars[i].sqldata, 20, "%lld",*(long*)sqlda->sqlvar[i].sqldata);
- break;
- }
- break;
- case IISQ_TSTMP_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSTMP_LEN+1);
- vars[i].sqldata[IISQ_TSTMP_LEN] = '\0';
- break;
- case IISQ_TSWO_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSWO_LEN+1);
- vars[i].sqldata[IISQ_TSWO_LEN] = '\0';
- break;
- case IISQ_TSW_TYPE:
- vars[i].sqldata = (char *)malloc(IISQ_TSW_LEN+1);
- vars[i].sqldata[IISQ_TSW_LEN] = '\0';
- break;
- }
- vars[i].sqlind = (short *)malloc(sizeof(short));
- if (sqlda->sqlvar[i].sqlind) {
- memcpy(vars[i].sqlind,sqlda->sqlvar[i].sqlind,sizeof(short));
- } else {
- *vars[i].sqlind = 0;
- }
- }
-
- return row;
-}
-
-
-void INGfreeRowSpace(ING_ROW *row, IISQLDA *sqlda)
-{
- int i;
-
- if (row == NULL || sqlda == NULL) {
- return;
- }
-
- for (i = 0; i < sqlda->sqld; ++i) {
- if (row->sqlvar[i].sqldata) {
- free(row->sqlvar[i].sqldata);
- }
- if (row->sqlvar[i].sqlind) {
- free(row->sqlvar[i].sqlind);
- }
- }
- free(row->sqlvar);
- free(row);
-}
-
-int INGfetchAll(const char *stmt, INGresult *ing_res)
-{
- int linecount = 0;
- ING_ROW *row;
- IISQLDA *desc;
- int check = -1;
-
- desc = ing_res->sqlda;
-
- EXEC SQL DECLARE c2 CURSOR FOR s2;
- if ((check = INGcheck()) < 0) {
- return check;
- }
-
- EXEC SQL OPEN c2;
- if ((check = INGcheck()) < 0) {
- return check;
- }
-
- /* for (linecount = 0; sqlca.sqlcode == 0; ++linecount) */
- do {
- EXEC SQL FETCH c2 USING DESCRIPTOR :desc;
-
- if ( (sqlca.sqlcode == 0) || (sqlca.sqlcode == -40202) ) {
- row = INGgetRowSpace(ing_res); /* alloc space for fetched row */
-
- /*
- * Initialize list when encountered first time
- */
- if (ing_res->first_row == 0) {
- ing_res->first_row = row; /* head of the list */
- ing_res->first_row->next = NULL;
- ing_res->act_row = ing_res->first_row;
- }
- ing_res->act_row->next = row; /* append row to old act_row */
- ing_res->act_row = row; /* set row as act_row */
- ++linecount;
- row->row_number = linecount;
- }
- } while ( (sqlca.sqlcode == 0) || (sqlca.sqlcode == -40202) );
-
- EXEC SQL CLOSE c2;
-
- ing_res->status = ING_COMMAND_OK;
- ing_res->num_rows = linecount;
- return linecount;
-}
-
-ING_STATUS INGresultStatus(INGresult *res)
-{
- if (res == NULL) {return ING_NO_RESULT;}
- return res->status;
-}
-
-void INGrowSeek(INGresult *res, int row_number)
-{
- ING_ROW *trow = NULL;
- if (res->act_row->row_number == row_number) {
- return;
- }
-
- /*
- * TODO: real error handling
- */
- if (row_number<0 || row_number>res->num_rows) {
- return;
- }
-
- for (trow = res->first_row ; trow->row_number != row_number ; trow = trow->next );
- res->act_row = trow;
- /*
- * Note - can be null - if row_number not found, right?
- */
-}
-
-char *INGgetvalue(INGresult *res, int row_number, int column_number)
-{
- if (row_number != res->act_row->row_number) {
- INGrowSeek(res, row_number);
- }
- return res->act_row->sqlvar[column_number].sqldata;
-}
-
-int INGgetisnull(INGresult *res, int row_number, int column_number)
-{
- if (row_number != res->act_row->row_number) {
- INGrowSeek(res, row_number);
- }
- return (short)*res->act_row->sqlvar[column_number].sqlind;
-}
-
-int INGntuples(const INGresult *res)
-{
- return res->num_rows;
-}
-
-int INGnfields(const INGresult *res)
-{
- return res->num_fields;
-}
-
-char *INGfname(const INGresult *res, int column_number)
-{
- if ((column_number > res->num_fields) || (column_number < 0)) {
- return NULL;
- } else {
- return res->fields[column_number].name;
- }
-}
-
-short INGftype(const INGresult *res, int column_number)
-{
- return res->fields[column_number].type;
-}
-
-int INGexec(INGconn *conn, const char *query)
-{
- int check;
- EXEC SQL BEGIN DECLARE SECTION;
- int rowcount;
- char *stmt;
- EXEC SQL END DECLARE SECTION;
-
- stmt = (char *)malloc(strlen(query)+1);
- bstrncpy(stmt,query,strlen(query)+1);
- rowcount = -1;
-
- EXEC SQL EXECUTE IMMEDIATE :stmt;
- free(stmt);
- if ((check = INGcheck()) < 0) {
- return check;
- }
-
- EXEC SQL INQUIRE_INGRES(:rowcount = ROWCOUNT);
- if ((check = INGcheck()) < 0) {
- return check;
- }
-
- return rowcount;
-}
-
-INGresult *INGquery(INGconn *conn, const char *query)
-{
- /*
- * TODO: error handling
- */
- IISQLDA *desc = NULL;
- INGresult *res = NULL;
- int rows = -1;
- int cols = INGgetCols(query);
-
- desc = INGgetDescriptor(cols, query);
- if (!desc) {
- return NULL;
- }
- res = INGgetINGresult(desc);
- if (!res) {
- return NULL;
- }
- rows = INGfetchAll(query, res);
-
- if (rows < 0) {
- INGfreeINGresult(res);
- INGfreeDescriptor(desc);
- return NULL;
- }
- return res;
-}
-
-void INGclear(INGresult *res)
-{
- if (res == NULL) {
- return;
- }
- IISQLDA *desc = res->sqlda;
- INGfreeINGresult(res);
- INGfreeDescriptor(desc);
-}
-
-INGconn *INGconnectDB(char *dbname, char *user, char *passwd)
-{
- if (dbname == NULL || strlen(dbname) == 0) {
- return NULL;
- }
-
- INGconn *dbconn = (INGconn *)malloc(sizeof(INGconn));
- memset(dbconn, 0, sizeof(INGconn));
-
- EXEC SQL BEGIN DECLARE SECTION;
- char ingdbname[24];
- char ingdbuser[32];
- char ingdbpasw[32];
- char conn_name[32];
- int sess_id;
- EXEC SQL END DECLARE SECTION;
-
- bstrncpy(ingdbname, dbname, sizeof(ingdbname));
-
- if (user != NULL) {
- bstrncpy(ingdbuser, user, sizeof(ingdbuser));
- if (passwd != NULL) {
- bstrncpy(ingdbpasw, passwd, sizeof(ingdbpasw));
- } else {
- memset(ingdbpasw, 0, sizeof(ingdbpasw));
- }
- EXEC SQL CONNECT
- :ingdbname
- identified by :ingdbuser
- dbms_password = :ingdbpasw;
- } else {
- EXEC SQL CONNECT :ingdbname;
- }
-
- EXEC SQL INQUIRE_SQL(:conn_name = connection_name);
- EXEC SQL INQUIRE_SQL(:sess_id = session);
-
- bstrncpy(dbconn->dbname, ingdbname, sizeof(dbconn->dbname));
- bstrncpy(dbconn->user, ingdbuser, sizeof(dbconn->user));
- bstrncpy(dbconn->password, ingdbpasw, sizeof(dbconn->password));
- bstrncpy(dbconn->connection_name, conn_name, sizeof(dbconn->connection_name));
- dbconn->session_id = sess_id;
- dbconn->msg = (char*)malloc(257);
- memset(dbconn->msg, 0, 257);
-
- return dbconn;
-}
-
-void INGdisconnectDB(INGconn *dbconn)
-{
- /*
- * TODO: check for any real use of dbconn: maybe whenn multithreaded?
- */
- EXEC SQL DISCONNECT;
- if (dbconn != NULL) {
- free(dbconn->msg);
- free(dbconn);
- }
-}
-
-char *INGerrorMessage(const INGconn *conn)
-{
- EXEC SQL BEGIN DECLARE SECTION;
- char errbuf[256];
- EXEC SQL END DECLARE SECTION;
-
- EXEC SQL INQUIRE_INGRES(:errbuf = ERRORTEXT);
- memcpy(conn->msg,&errbuf,256);
- return conn->msg;
-}
-
-char *INGcmdTuples(INGresult *res)
-{
- return res->numrowstring;
-}
-
-/* TODO?
-int INGputCopyEnd(INGconn *conn, const char *errormsg);
-int INGputCopyData(INGconn *conn, const char *buffer, int nbytes);
-*/
-
-#endif
+++ /dev/null
-#ifndef _MYINGRES_SH
-#define _MYINGRES_SH
-
-EXEC SQL INCLUDE SQLDA;
-
-/* ---typedefs--- */
-
-typedef struct ing_field {
- char name[34];
- int max_length;
- unsigned int type;
- unsigned int flags; // 1 == not null
-} INGRES_FIELD;
-
-typedef struct ing_row {
- IISQLVAR *sqlvar; /* ptr to sqlvar[sqld] for one row */
- struct ing_row *next;
- int row_number;
-} ING_ROW;
-
-typedef enum ing_status {
- ING_COMMAND_OK,
- ING_TUPLES_OK,
- ING_NO_RESULT,
- ING_NO_ROWS_PROCESSED,
- ING_EMPTY_RESULT,
- ING_ERROR
-} ING_STATUS;
-
-typedef struct ing_varchar {
- short len;
- char* value;
-} ING_VARCHAR;
-
-/* It seems, Bacula needs the complete query result stored in one data structure */
-typedef struct ing_result {
- IISQLDA *sqlda; /* descriptor */
- INGRES_FIELD *fields;
- int num_rows;
- int num_fields;
- ING_STATUS status;
- ING_ROW *first_row;
- ING_ROW *act_row; /* just for iterating */
- char numrowstring[10];
-
-} INGresult;
-
-typedef struct ing_conn {
- char dbname[24];
- char user[32];
- char password[32];
- char connection_name[32];
- int session_id;
- char *msg;
-} INGconn;
-
-
-/* ---Prototypes--- */
-int INGcheck();
-ING_STATUS INGresultStatus(INGresult *res);
-short INGgetCols(const char *stmt);
-IISQLDA *INGgetDescriptor(short numCols, const char *stmt);
-void INGfreeDescriptor(IISQLDA *sqlda);
-int INGgetTypeSize(IISQLVAR *ingvar);
-INGresult *INGgetINGresult(IISQLDA *sqlda);
-void INGfreeINGresult(INGresult *ing_res);
-ING_ROW *INGgetRowSpace(INGresult *ing_res);
-void INGfreeRowSpace(ING_ROW *row, IISQLDA *sqlda);
-int INGfetchAll(const char *stmt, INGresult *ing_res);
-void INGrowSeek(INGresult *res, int row_number);
-char *INGgetvalue(INGresult *res, int row_number, int column_number);
-int INGgetisnull(INGresult *res, int row_number, int column_number);
-int INGntuples(const INGresult *res);
-int INGnfields(const INGresult *res);
-char *INGfname(const INGresult *res, int column_number);
-short INGftype(const INGresult *res, int column_number);
-INGresult *INGexec(INGconn *db, const char *query);
-void INGclear(INGresult *res);
-INGconn *INGconnectDB(char *dbname, char *user, char *passwd);
-void INGdisconnectDB(INGconn *dbconn);
-char *INGerrorMessage(const INGconn *conn);
-char *INGcmdTuples(INGresult *res);
-
-#endif /* _MYINGRES_SH */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
db_check_backend_thread_safe();
/* Display a message if the db max_connections is too low */
- if (!db_check_max_connections(NULL, db, director->MaxConcurrentJobs+1)) {
+ if (!db_check_max_connections(NULL, db, director->MaxConcurrentJobs)) {
Pmsg1(000, "Warning, settings problem for Catalog=%s\n", catalog->name());
Pmsg1(000, "%s", db_strerror(db));
}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, January MM
*
- * Version $Id$
*/
uint32_t *destination = (uint32_t*)item->value;
lex_get_token(lc, T_NAME);
if (strcasecmp(lc->str, "truncate") == 0) {
- *destination = (*destination) | AOP_TRUNCATE;
+ *destination = (*destination) | ON_PURGE_TRUNCATE;
} else {
scan_err2(lc, _("Expected one of: %s, got: %s"), "Truncate", lc->str);
return;
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
* Requests are any message that does not begin with a digit.
* In affect, they are commands.
*
- * Version $Id$
*/
#include "bacula.h"
static char OK_msg[] = "1000 OK\n";
-void set_jcr_sd_job_status(JCR *jcr, int SDJobStatus)
+static void set_jcr_sd_job_status(JCR *jcr, int SDJobStatus)
{
bool set_waittime=false;
Dmsg2(800, "set_jcr_sd_job_status(%s, %c)\n", jcr->Job, SDJobStatus);
bool has_volume_expired(JCR *jcr, MEDIA_DBR *mr)
{
bool expired = false;
+ char ed1[50];
/*
* Check limits and expirations if "Append" and it has been used
* i.e. mr->VolJobs > 0
if (strcmp(mr->VolStatus, "Append") == 0 && mr->VolJobs > 0) {
/* First handle Max Volume Bytes */
if ((mr->MaxVolBytes > 0 && mr->VolBytes >= mr->MaxVolBytes)) {
- Jmsg(jcr, M_INFO, 0, _("Max Volume bytes exceeded. "
- "Marking Volume \"%s\" as Full.\n"), mr->VolumeName);
+ Jmsg(jcr, M_INFO, 0, _("Max Volume bytes=%s exceeded. "
+ "Marking Volume \"%s\" as Full.\n"),
+ edit_uint64_with_commas(mr->MaxVolBytes, ed1), mr->VolumeName);
bstrncpy(mr->VolStatus, "Full", sizeof(mr->VolStatus));
expired = true;
/* Now see if Max Jobs written to volume */
} else if (mr->MaxVolJobs > 0 && mr->MaxVolJobs <= mr->VolJobs) {
- Jmsg(jcr, M_INFO, 0, _("Max Volume jobs exceeded. "
- "Marking Volume \"%s\" as Used.\n"), mr->VolumeName);
+ Jmsg(jcr, M_INFO, 0, _("Max Volume jobs=%s exceeded. "
+ "Marking Volume \"%s\" as Used.\n"),
+ edit_uint64_with_commas(mr->MaxVolJobs, ed1), mr->VolumeName);
Dmsg3(100, "MaxVolJobs=%d JobId=%d Vol=%s\n", mr->MaxVolJobs,
(uint32_t)jcr->JobId, mr->VolumeName);
bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus));
/* Now see if Max Files written to volume */
} else if (mr->MaxVolFiles > 0 && mr->MaxVolFiles <= mr->VolFiles) {
- Jmsg(jcr, M_INFO, 0, _("Max Volume files exceeded. "
- "Marking Volume \"%s\" as Used.\n"), mr->VolumeName);
+ Jmsg(jcr, M_INFO, 0, _("Max Volume files=%s exceeded. "
+ "Marking Volume \"%s\" as Used.\n"),
+ edit_uint64_with_commas(mr->MaxVolFiles, ed1), mr->VolumeName);
bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus));
expired = true;
utime_t now = time(NULL);
/* See if Vol Use has expired */
if (mr->VolUseDuration <= (now - mr->FirstWritten)) {
- Jmsg(jcr, M_INFO, 0, _("Max configured use duration exceeded. "
- "Marking Volume \"%s\" as Used.\n"), mr->VolumeName);
+ Jmsg(jcr, M_INFO, 0, _("Max configured use duration=%s sec. exceeded. "
+ "Marking Volume \"%s\" as Used.\n"),
+ edit_uint64_with_commas(mr->VolUseDuration, ed1), mr->VolumeName);
bstrncpy(mr->VolStatus, "Used", sizeof(mr->VolStatus));
expired = true;
}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2007 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*/
/*
* Director external function prototypes
- *
- * Version $Id$
*/
/* admin.c */
char *link, char *attr, int stream);
extern void get_level_since_time(JCR *jcr, char *since, int since_len);
extern int send_runscripts_commands(JCR *jcr);
+extern bool send_restore_objects(JCR *jcr);
/* getmsg.c */
enum e_prtmsg {
bool do_a_command(UAContext *ua);
bool do_a_dot_command(UAContext *ua);
int qmessagescmd(UAContext *ua, const char *cmd);
+bool open_new_client_db(UAContext *ua);
bool open_client_db(UAContext *ua);
bool open_db(UAContext *ua);
void close_db(UAContext *ua);
bool is_yesno(char *val, int *ret);
int get_enabled(UAContext *ua, const char *val);
void parse_ua_args(UAContext *ua);
+bool is_comment_legal(UAContext *ua, const char *name);
/* ua_label.c */
bool is_volume_name_legal(UAContext *ua, const char *name);
# See the file <bacula-source>/examples/sample-query.sql
# for some sample queries.
#
+# 1
+:The default file is empty, see <bacula-source>/examples/sample-query.sql for samples
+SELECT 'See <bacula-source>/examples/sample-query.sql for samples' AS Info;
int max_prompts; /* max size of list */
int num_prompts; /* current number in list */
int api; /* For programs want an API */
+ bool force_mult_db_connections; /* overwrite cat.mult_db_connections */
bool auto_display_messages; /* if set, display messages */
bool user_notified_msg_pending; /* set when user notified */
bool automount; /* if set, mount after label */
POOL *pool;
int restore_jobs;
uint32_t selected_files;
+ char *comment;
char *where;
char *RegexWhere;
RBSR *bsr;
}
#endif
+/*
+ * This call uses open_client_db() and force a
+ * new dedicated connection to the catalog
+ */
+bool open_new_client_db(UAContext *ua)
+{
+ bool ret;
+
+ /* Force a new dedicated connection */
+ close_db(ua);
+ ua->force_mult_db_connections = true;
+ ret = open_client_db(ua);
+ ua->force_mult_db_connections = false;
+ return ret;
+}
+
/*
* This call explicitly checks for a catalog=xxx and
* if given, opens that catalog. It also checks for
*/
bool open_db(UAContext *ua)
{
+ bool mult_db_conn;
+
if (ua->db) {
return true;
}
}
}
+ /* Some modules like bvfs need their own catalog connection */
+ mult_db_conn = ua->catalog->mult_db_connections;
+ if (ua->force_mult_db_connections) {
+ mult_db_conn = true;
+ }
+
ua->jcr->catalog = ua->catalog;
Dmsg0(100, "UA Open database\n");
ua->catalog->db_user,
ua->catalog->db_password, ua->catalog->db_address,
ua->catalog->db_port, ua->catalog->db_socket,
- ua->catalog->mult_db_connections);
+ mult_db_conn);
if (!ua->db || !db_open_database(ua->jcr, ua->db)) {
ua->error_msg(_("Could not open catalog database \"%s\".\n"),
ua->catalog->db_name);
static bool dot_bvfs_update(UAContext *ua, const char *cmd)
{
-
- if (!open_client_db(ua)) {
+ if (!open_new_client_db(ua)) {
return 1;
}
/* update cache for all jobids */
bvfs_update_cache(ua->jcr, ua->db);
}
+
+ close_db(ua);
return true;
}
}
Dmsg0(120, _("Connected to storage daemon\n"));
sd = jcr->store_bsock;
- sd->fsend(".die");
+ sd->fsend("%s", cmd);
if (sd->recv() >= 0) {
ua->send_msg("%s", sd->msg);
}
}
Dmsg0(120, "Connected to file daemon\n");
fd = ua->jcr->file_bsock;
- fd->fsend(".die");
+ fd->fsend("%s", cmd);
if (fd->recv() >= 0) {
ua->send_msg("%s", fd->msg);
}
client = NULL;
if (ua->argv[i]) {
client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[i]);
- if (client) {
- do_client_die(ua, client);
- return 1;
- }
}
- client = select_client_resource(ua);
- if (client) {
- do_client_die(ua, client);
- return 1;
+ if (!client) {
+ client = select_client_resource(ua);
}
}
-
+
if (strcasecmp(ua->argk[i], NT_("store")) == 0 ||
strcasecmp(ua->argk[i], NT_("storage")) == 0 ||
strcasecmp(ua->argk[i], NT_("sd")) == 0) {
store = NULL;
if (ua->argv[i]) {
store = (STORE *)GetResWithName(R_STORAGE, ua->argv[i]);
- if (store) {
- do_storage_die(ua, store);
- return 1;
- }
}
- store = get_storage_resource(ua, false/*no default*/);
- if (store) {
- do_storage_die(ua, store);
- return 1;
+ if (!store) {
+ store = get_storage_resource(ua, false/*no default*/);
}
}
}
+ if (!dir && !store && !client) {
+ /*
+ * We didn't find an appropriate keyword above, so
+ * prompt the user.
+ */
+ start_prompt(ua, _("Available daemons are: \n"));
+ add_prompt(ua, _("Director"));
+ add_prompt(ua, _("Storage"));
+ add_prompt(ua, _("Client"));
+ switch(do_prompt(ua, "", _("Select daemon type to make die"), NULL, 0)) {
+ case 0: /* Director */
+ dir=true;
+ break;
+ case 1:
+ store = get_storage_resource(ua, false/*no default*/);
+ break;
+ case 2:
+ client = select_client_resource(ua);
+ break;
+ default:
+ break;
+ }
+ }
+
if (store) {
do_storage_cmd(ua, store, remote_cmd);
}
dot_quit_cmd(ua, cmd);
}
}
+
return true;
}
ua->send_msg("Admin\n");
ua->send_msg("Verify\n");
ua->send_msg("Migrate\n");
+ ua->send_msg("Copy\n");
return true;
}
{
parse_args(ua->cmd, &ua->args, &ua->argc, ua->argk, ua->argv, MAX_CMD_ARGS);
}
+
+/*
+ * Check if the comment has legal characters
+ * If ua is non-NULL send the message
+ */
+bool is_comment_legal(UAContext *ua, const char *name)
+{
+ int len;
+ const char *p;
+ const char *forbid = "'<>&\\\"";
+
+ /* Restrict the characters permitted in the comment */
+ for (p=name; *p; p++) {
+ if (!strchr(forbid, (int)(*p))) {
+ continue;
+ }
+ if (ua) {
+ ua->error_msg(_("Illegal character \"%c\" in a comment.\n"), *p);
+ }
+ return 0;
+ }
+ len = strlen(name);
+ if (len >= MAX_NAME_LENGTH) {
+ if (ua) {
+ ua->error_msg(_("Comment too long.\n"));
+ }
+ return 0;
+ }
+ if (len == 0) {
+ if (ua) {
+ ua->error_msg(_("Comment must be at least one character long.\n"));
+ }
+ return 0;
+ }
+ return 1;
+}
}
Dmsg0(100, "complete_jcr open db\n");
- jcr->db = jcr->db=db_init(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
- jcr->catalog->db_user,
- jcr->catalog->db_password, jcr->catalog->db_address,
- jcr->catalog->db_port, jcr->catalog->db_socket,
- jcr->catalog->mult_db_connections);
+ jcr->db = db_init(jcr, jcr->catalog->db_driver, jcr->catalog->db_name,
+ jcr->catalog->db_user,
+ jcr->catalog->db_password, jcr->catalog->db_address,
+ jcr->catalog->db_port, jcr->catalog->db_socket,
+ jcr->catalog->mult_db_connections);
if (!jcr->db || !db_open_database(jcr, jcr->db)) {
Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
jcr->catalog->db_name);
PYTHON_LIBS = @PYTHON_LIBS@
PYTHON_INC = @PYTHON_INCDIR@
+AFS_CFLAGS = @AFS_CFLAGS@
+AFS_LIBS = @AFS_LIBS@
first_rule: all
dummy:
CAP_LIBS = @CAP_LIBS@
FDLIBS = @FDLIBS@ # extra libs for File daemon
+ZLIBS = @ZLIBS@
# extra items for linking on Win32
WIN32OBJS = win32/winmain.o win32/winlib.a win32/winres.res
@echo "==== Make of filed is good ===="
@echo " "
+acl.o: acl.c
+ @echo "Compiling $<"
+ $(NO_ECHO)$(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(PYTHON_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $(AFS_CFLAGS) $<
+
win32/winlib.a:
@if test -f win32/Makefile -a "${GMAKE}" != "none"; then \
(cd win32; $(GMAKE) DESTDIR=$(DESTDIR)); \
bacula-fd: Makefile $(SVROBJS) ../findlib/libbacfind$(DEFAULT_ARCHIVE_TYPE) ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @WIN32@
@echo "Linking $@ ..."
$(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -L../lib -L../findlib -o $@ $(SVROBJS) \
- $(WIN32LIBS) $(FDLIBS) -lbacfind -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(LIBS) \
- $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
+ $(WIN32LIBS) $(FDLIBS) $(ZLIBS) -lbacfind -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(LIBS) \
+ $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) $(AFS_LIBS)
static-bacula-fd: Makefile $(SVROBJS) ../findlib/libbacfind.a ../lib/libbacpy$(DEFAULT_ARCHIVE_TYPE) ../lib/libbaccfg$(DEFAULT_ARCHIVE_TYPE) ../lib/libbac$(DEFAULT_ARCHIVE_TYPE) @WIN32@
$(LIBTOOL_LINK) $(CXX) $(WLDFLAGS) $(LDFLAGS) -static -L../lib -L../findlib -o $@ $(SVROBJS) \
- $(WIN32LIBS) $(FDLIBS) -lbacfind -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(LIBS) \
- $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS)
+ $(WIN32LIBS) $(FDLIBS) $(ZLIBS) -lbacfind -lbacpy -lbaccfg -lbac -lm $(PYTHON_LIBS) $(LIBS) \
+ $(DLIB) $(WRAPLIBS) $(GETTEXT_LIBS) $(OPENSSL_LIBS) $(CAP_LIBS) $(AFS_LIBS)
strip $@
Makefile: $(srcdir)/Makefile.in $(topdir)/config.status
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2001-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2001-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, August MMI
*
- * Version $Id$
- *
*/
#include "bacula.h"
my_name, VERSION, BDATE, VSS, HOST_OS, DISTNAME, DISTVER);
sendit(msg.c_str(), len, sp);
bstrftime_nc(dt, sizeof(dt), daemon_start_time);
- len = Mmsg(msg, _("Daemon started %s, %d Job%s run since started.\n"),
- dt, num_jobs_run, num_jobs_run == 1 ? "" : "s");
+ len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d running=%d.\n"),
+ dt, num_jobs_run, job_count());
sendit(msg.c_str(), len, sp);
#if defined(HAVE_WIN32)
if (debug_level > 0) {
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, October MM
*
- * Version $Id$
- *
*/
#include "bacula.h"
case FT_NOFSCHG:
Jmsg(jcr, M_SKIPPED, 1, _(" File system change prohibited. Directory skipped: %s\n"), ff_pkt->fname);
return 1;
+ case FT_RESTORE_FIRST:
+ return 1; /* silently skip */
case FT_NOOPEN: {
berrno be;
be.set_errno(ff_pkt->ff_errno);
-/*
- * Bacula File Daemon verify-vol.c Verify files on a Volume
- * versus attributes in Catalog
- *
- * Kern Sibbald, July MMII
- *
- * Version $Id$
- *
- */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2002-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2002-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
*/
+/*
+ * Bacula File Daemon verify-vol.c Verify files on a Volume
+ * versus attributes in Catalog
+ *
+ * Kern Sibbald, July MMII
+ *
+ */
#include "bacula.h"
#include "filed.h"
#
# include files installed when using libtool
#
-INCLUDE_FILES = bfile.h find.h protos.h savecwd.h
+INCLUDE_FILES = bfile.h find.h protos.h
#
LIBBACFIND_SRCS = find.c match.c find_one.c attribs.c create_file.c \
}
}
-
+/**
+ * Convert a 64 bit little endian to a big endian
+ */
void int64_LE2BE(int64_t* pBE, const int64_t v)
{
/* convert little endian to big endian */
}
}
-
+/**
+ * Convert a 32 bit little endian to a big endian
+ */
void int32_LE2BE(int32_t* pBE, const int32_t v)
{
/* convert little endian to big endian */
}
+/**
+ * Read a BackupRead block and pull out the file data
+ */
bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize)
{
/* pByte contains the buffer
#include "bacula.h"
#include "find.h"
+static const int dbglvl = 450;
+
int32_t name_max; /* filename max length */
int32_t path_max; /* path name max length */
path_max++; /* add for EOS */
name_max++; /* add for EOS */
- Dmsg1(100, "init_find_files ff=%p\n", ff);
+ Dmsg1(dbglvl, "init_find_files ff=%p\n", ff);
return ff;
}
void
set_find_options(FF_PKT *ff, int incremental, time_t save_time)
{
- Dmsg0(100, "Enter set_find_options()\n");
+ Dmsg0(dbglvl, "Enter set_find_options()\n");
ff->incremental = incremental;
ff->save_time = save_time;
- Dmsg0(100, "Leave set_find_options()\n");
+ Dmsg0(dbglvl, "Leave set_find_options()\n");
}
void
set_find_changed_function(FF_PKT *ff, bool check_fct(JCR *jcr, FF_PKT *ff))
{
- Dmsg0(100, "Enter set_find_changed_function()\n");
+ Dmsg0(dbglvl, "Enter set_find_changed_function()\n");
ff->check_fct = check_fct;
}
findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i);
fileset->incexe = incexe;
/*
- * By setting all options, we in effect or the global options
+ * By setting all options, we in effect OR the global options
* which is what we want.
*/
for (j=0; j<incexe->opts_list.size(); j++) {
dlistString *node;
foreach_dlist(node, &incexe->name_list) {
char *fname = node->c_str();
- Dmsg1(100, "F %s\n", fname);
+ Dmsg1(dbglvl, "F %s\n", fname);
ff->top_fname = fname;
if (find_one_file(jcr, ff, our_callback, ff->top_fname, (dev_t)-1, true) == 0) {
return 0; /* error return */
Jmsg(jcr, M_FATAL, 0, _("Plugin: \"%s\" not found.\n"), fname);
return 0;
}
- Dmsg1(100, "PluginCommand: %s\n", fname);
+ Dmsg1(dbglvl, "PluginCommand: %s\n", fname);
ff->top_fname = fname;
ff->cmd_plugin = true;
plugin_save(jcr, ff, true);
incexe = (findINCEXE *)fileset->include_list.get(i);
foreach_dlist(node, &incexe->name_list) {
fname = node->c_str();
- Dmsg2(100, "Inc fname=%s ff->fname=%s\n", fname, ff->fname);
+ Dmsg2(dbglvl, "Inc fname=%s ff->fname=%s\n", fname, ff->fname);
if (strcmp(fname, ff->fname) == 0) {
return true;
}
incexe = (findINCEXE *)fileset->exclude_list.get(i);
foreach_dlist(node, &incexe->name_list) {
fname = node->c_str();
- Dmsg2(100, "Exc fname=%s ff->fname=%s\n", fname, ff->fname);
+ Dmsg2(dbglvl, "Exc fname=%s ff->fname=%s\n", fname, ff->fname);
if (strcmp(fname, ff->fname) == 0) {
return true;
}
const char *basename;
int (*match_func)(const char *pattern, const char *string, int flags);
+ Dmsg1(dbglvl, "enter accept_file: fname=%s\n", ff->fname);
if (ff->flags & FO_ENHANCEDWILD) {
// match_func = enh_fnmatch;
match_func = fnmatch;
for (k=0; k<fo->wilddir.size(); k++) {
if (match_func((char *)fo->wilddir.get(k), ff->fname, fnmode|fnm_flags) == 0) {
if (ff->flags & FO_EXCLUDE) {
- Dmsg2(100, "Exclude wilddir: %s file=%s\n", (char *)fo->wilddir.get(k),
+ Dmsg2(dbglvl, "Exclude wilddir: %s file=%s\n", (char *)fo->wilddir.get(k),
ff->fname);
return false; /* reject dir */
}
for (k=0; k<fo->wildfile.size(); k++) {
if (match_func((char *)fo->wildfile.get(k), ff->fname, fnmode|fnm_flags) == 0) {
if (ff->flags & FO_EXCLUDE) {
- Dmsg2(100, "Exclude wildfile: %s file=%s\n", (char *)fo->wildfile.get(k),
+ Dmsg2(dbglvl, "Exclude wildfile: %s file=%s\n", (char *)fo->wildfile.get(k),
ff->fname);
return false; /* reject file */
}
for (k=0; k<fo->wildbase.size(); k++) {
if (match_func((char *)fo->wildbase.get(k), basename, fnmode|fnm_flags) == 0) {
if (ff->flags & FO_EXCLUDE) {
- Dmsg2(100, "Exclude wildbase: %s file=%s\n", (char *)fo->wildbase.get(k),
+ Dmsg2(dbglvl, "Exclude wildbase: %s file=%s\n", (char *)fo->wildbase.get(k),
basename);
return false; /* reject file */
}
for (k=0; k<fo->wild.size(); k++) {
if (match_func((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) {
if (ff->flags & FO_EXCLUDE) {
- Dmsg2(100, "Exclude wild: %s file=%s\n", (char *)fo->wild.get(k),
+ Dmsg2(dbglvl, "Exclude wild: %s file=%s\n", (char *)fo->wild.get(k),
ff->fname);
return false; /* reject file */
}
fnm_flags = (fo->flags & FO_IGNORECASE) ? FNM_CASEFOLD : 0;
for (k=0; k<fo->wild.size(); k++) {
if (fnmatch((char *)fo->wild.get(k), ff->fname, fnmode|fnm_flags) == 0) {
- Dmsg1(100, "Reject wild1: %s\n", ff->fname);
+ Dmsg1(dbglvl, "Reject wild1: %s\n", ff->fname);
return false; /* reject file */
}
}
foreach_dlist(node, &incexe->name_list) {
char *fname = node->c_str();
if (fnmatch(fname, ff->fname, fnmode|fnm_flags) == 0) {
- Dmsg1(100, "Reject wild2: %s\n", ff->fname);
+ Dmsg1(dbglvl, "Reject wild2: %s\n", ff->fname);
return false; /* reject file */
}
}
if (accept_file(ff)) {
return ff->file_save(jcr, ff, top_level);
} else {
- Dmsg1(100, "Skip file %s\n", ff->fname);
+ Dmsg1(dbglvl, "Skip file %s\n", ff->fname);
return -1; /* ignore this file */
}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2001-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
char *top_fname; /* full filename before descending */
char *fname; /* full filename */
char *link; /* link if file linked */
+ char *object_name; /* Object name */
+ char *object; /* restore object */
POOLMEM *sys_fname; /* system filename */
POOLMEM *fname_save; /* save when stripping path */
POOLMEM *link_save; /* save when stripping path */
struct stat statp; /* stat packet */
int32_t FileIndex; /* FileIndex of this file */
int32_t LinkFI; /* FileIndex of main hard linked file */
+ int32_t object_index; /* Object index */
+ int32_t object_len; /* Object length */
+ int32_t object_compression; /* Type of compression for object */
struct f_link *linked; /* Set if this file is hard linked */
int type; /* FT_ type from above */
int ff_errno; /* errno */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
Thanks to the TAR programmers.
- Version $Id$
-
*/
#include "bacula.h"
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2007-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2007-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
/*
* Kern Sibbald, August MMVII
*
- * Version $Id$
*/
#include "bacula.h"
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2007-2007 Free Software Foundation Europe e.V.
+ Copyright (C) 2007-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
/*
* Kern Sibbald, August MMVII
*
- * Version $Id$
*/
#ifndef _SAVECWD_H
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, Nov MM
*
- * Version $Id$
*/
#define JT_SCAN 'S' /* Scan Job */
/* Job Status. Some of these are stored in the DB */
-#define JS_Created 'C' /* created but not yet running */
-#define JS_Running 'R' /* running */
+#define JS_Canceled 'A' /* canceled by user */
#define JS_Blocked 'B' /* blocked */
-#define JS_Terminated 'T' /* terminated normally */
-#define JS_Warnings 'W' /* Terminated normally with warnings */
-#define JS_ErrorTerminated 'E' /* Job terminated in error */
-#define JS_Error 'e' /* Non-fatal error */
-#define JS_FatalError 'f' /* Fatal error */
+#define JS_Created 'C' /* created but not yet running */
#define JS_Differences 'D' /* Verify differences */
-#define JS_Canceled 'A' /* canceled by user */
-#define JS_Incomplete 'I' /* Incomplete Job */
+#define JS_ErrorTerminated 'E' /* Job terminated in error */
#define JS_WaitFD 'F' /* waiting on File daemon */
-#define JS_WaitSD 'S' /* waiting on the Storage daemon */
-#define JS_WaitMedia 'm' /* waiting for new media */
+#define JS_Incomplete 'I' /* Incomplete Job */
+#define JS_DataCommitting 'L' /* Committing data (last despool) */
#define JS_WaitMount 'M' /* waiting for Mount */
-#define JS_WaitStoreRes 's' /* Waiting for storage resource */
-#define JS_WaitJobRes 'j' /* Waiting for job resource */
+#define JS_Running 'R' /* running */
+#define JS_WaitSD 'S' /* waiting on the Storage daemon */
+#define JS_Terminated 'T' /* terminated normally */
+#define JS_Warnings 'W' /* Terminated normally with warnings */
+
+#define JS_AttrDespooling 'a' /* SD despooling attributes */
#define JS_WaitClientRes 'c' /* Waiting for Client resource */
#define JS_WaitMaxJobs 'd' /* Waiting for maximum jobs */
-#define JS_WaitStartTime 't' /* Waiting for start time */
-#define JS_WaitPriority 'p' /* Waiting for higher priority jobs to finish */
-#define JS_AttrDespooling 'a' /* SD despooling attributes */
+#define JS_Error 'e' /* Non-fatal error */
+#define JS_FatalError 'f' /* Fatal error */
#define JS_AttrInserting 'i' /* Doing batch insert file records */
+#define JS_WaitJobRes 'j' /* Waiting for job resource */
#define JS_DataDespooling 'l' /* Doing data despooling */
-#define JS_DataCommitting 'L' /* Committing data (last despool) */
+#define JS_WaitMedia 'm' /* waiting for new media */
+#define JS_WaitPriority 'p' /* Waiting for higher priority jobs to finish */
+#define JS_WaitStoreRes 's' /* Waiting for storage resource */
+#define JS_WaitStartTime 't' /* Waiting for start time */
/* Migration selection types */
enum {
time_t wait_time_sum; /* cumulative wait time since job start */
time_t wait_time; /* timestamp when job have started to wait */
POOLMEM *client_name; /* client name */
+ POOLMEM *JobIds; /* User entered string of JobIds */
POOLMEM *RestoreBootstrap; /* Bootstrap file to restore */
POOLMEM *stime; /* start time for incremental/differential */
char *sd_auth_key; /* SD auth key */
save_pkt *plugin_sp; /* plugin save packet */
char *plugin_options; /* user set options for plugin */
bool cmd_plugin; /* Set when processing a command Plugin = */
+ POOLMEM *comment; /* Comment for this Job */
/* Daemon specific part of JCR */
/* This should be empty in the library */
/* File Daemon specific part of JCR */
uint32_t num_files_examined; /* files examined this job */
POOLMEM *last_fname; /* last file saved/verified */
+ POOLMEM *job_metadata; /* VSS job metadata */
acl_data_t *acl_data; /* ACLs for backup/restore */
xattr_data_t *xattr_data; /* Extended Attributes for backup/restore */
int32_t last_type; /* type of last file saved/verified */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, June MMIII
*
- * Version $Id$
- *
*/
#include "bacula.h"
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
Switzerland, email:ftf@fsfeurope.org.
*/
/*
- * Version $Id$
- *
* Kern Sibbald, June MMIII
*/
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Bacula utility functions are in util.c
*
- * Version $Id$
*/
#include "bacula.h"
+#ifdef HAVE_LIBZ
+#include <zlib.h>
+#endif
static pthread_mutex_t timer_mutex = PTHREAD_MUTEX_INITIALIZER;
return escaped_path;
}
+
+/*
+ * Deflate or compress and input buffer. You must supply an
+ * output buffer sufficiently long and the length of the
+ * output buffer. Generally, if the output buffer is the
+ * same size as the input buffer, it should work (at least
+ * for text).
+ */
+int Zdeflate(char *in, int in_len, char *out, int &out_len)
+{
+#ifdef HAVE_LIBZ
+ z_stream strm;
+ int ret;
+
+ /* allocate deflate state */
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ ret = deflateInit(&strm, 9);
+ if (ret != Z_OK) {
+ Dmsg0(200, "deflateInit error\n");
+ (void)deflateEnd(&strm);
+ return ret;
+ }
+
+ strm.next_in = (Bytef *)in;
+ strm.avail_in = in_len;
+ Dmsg1(200, "In: %d bytes\n", strm.avail_in);
+ strm.avail_out = out_len;
+ strm.next_out = (Bytef *)out;
+ ret = deflate(&strm, Z_FINISH);
+ out_len = out_len - strm.avail_out;
+ Dmsg1(200, "compressed=%d\n", out_len);
+ (void)deflateEnd(&strm);
+ return ret;
+#else
+ return 1;
+#endif
+}
+
+/*
+ * Inflate or uncompress an input buffer. You must supply
+ * and output buffer and an output length sufficiently long
+ * or there will be an error. This uncompresses in one call.
+ */
+int Zinflate(char *in, int in_len, char *out, int &out_len)
+{
+#ifdef HAVE_LIBZ
+ z_stream strm;
+ int ret;
+
+ /* allocate deflate state */
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.next_in = (Bytef *)in;
+ strm.avail_in = in_len;
+ ret = inflateInit(&strm);
+ if (ret != Z_OK) {
+ Dmsg0(200, "inflateInit error\n");
+ (void)inflateEnd(&strm);
+ return ret;
+ }
+
+ Dmsg1(200, "In len: %d bytes\n", strm.avail_in);
+ strm.avail_out = out_len;
+ strm.next_out = (Bytef *)out;
+ ret = inflate(&strm, Z_FINISH);
+ out_len -= strm.avail_out;
+ Dmsg1(200, "Uncompressed=%d\n", out_len);
+ (void)inflateEnd(&strm);
+ return ret;
+#else
+ return 1;
+#endif
+}
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2001-2010 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Bacula Thread Read/Write locking code. It permits
+ * multiple readers but only one writer. Note, however,
+ * that the writer thread is permitted to make multiple
+ * nested write lock calls.
+ *
+ * Kern Sibbald, January MMI
+ *
+ * This code adapted from "Programming with POSIX Threads", by
+ * David R. Butenhof
+ *
+ */
+
+#define _LOCKMGR_COMPLIANT
+#include "bacula.h"
+#include "devlock.h"
+
+/*
+ * Initialize a read/write lock
+ *
+ * Returns: 0 on success
+ * errno on failure
+ */
+
+devlock *new_devlock()
+{
+ devlock *lock;
+ lock = (devlock *)malloc(sizeof (devlock));
+ memset(lock, 0, sizeof(devlock));
+ return lock;
+}
+
+int devlock::init(int priority)
+{
+ int stat;
+ devlock *rwl = this;
+
+ rwl->r_active = rwl->w_active = 0;
+ rwl->r_wait = rwl->w_wait = 0;
+ rwl->priority = priority;
+ if ((stat = pthread_mutex_init(&rwl->mutex, NULL)) != 0) {
+ return stat;
+ }
+ if ((stat = pthread_cond_init(&rwl->read, NULL)) != 0) {
+ pthread_mutex_destroy(&rwl->mutex);
+ return stat;
+ }
+ if ((stat = pthread_cond_init(&rwl->write, NULL)) != 0) {
+ pthread_cond_destroy(&rwl->read);
+ pthread_mutex_destroy(&rwl->mutex);
+ return stat;
+ }
+ rwl->valid = DEVLOCK_VALID;
+ return 0;
+}
+
+/*
+ * Destroy a read/write lock
+ *
+ * Returns: 0 on success
+ * errno on failure
+ */
+int devlock::destroy()
+{
+ devlock *rwl = this;
+ int stat, stat1, stat2;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+
+ /*
+ * If any threads are active, report EBUSY
+ */
+ if (rwl->r_active > 0 || rwl->w_active) {
+ pthread_mutex_unlock(&rwl->mutex);
+ return EBUSY;
+ }
+
+ /*
+ * If any threads are waiting, report EBUSY
+ */
+ if (rwl->r_wait > 0 || rwl->w_wait > 0) {
+ pthread_mutex_unlock(&rwl->mutex);
+ return EBUSY;
+ }
+
+ rwl->valid = 0;
+ if ((stat = pthread_mutex_unlock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ stat = pthread_mutex_destroy(&rwl->mutex);
+ stat1 = pthread_cond_destroy(&rwl->read);
+ stat2 = pthread_cond_destroy(&rwl->write);
+ return (stat != 0 ? stat : (stat1 != 0 ? stat1 : stat2));
+}
+
+/*
+ * Handle cleanup when the read lock condition variable
+ * wait is released.
+ */
+static void devlock_read_release(void *arg)
+{
+ devlock *rwl = (devlock *)arg;
+ rwl->read_release();
+}
+
+void devlock::read_release()
+{
+ r_wait--;
+ pthread_mutex_unlock(&mutex);
+}
+
+/*
+ * Handle cleanup when the write lock condition variable wait
+ * is released.
+ */
+static void devlock_write_release(void *arg)
+{
+ devlock *rwl = (devlock *)arg;
+ rwl->write_release();
+}
+
+void devlock::write_release()
+{
+ w_wait--;
+ pthread_mutex_unlock(&mutex);
+}
+
+/*
+ * Lock for read access, wait until locked (or error).
+ */
+int devlock::readlock()
+{
+ devlock *rwl = this;
+ int stat;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ if (rwl->w_active) {
+ rwl->r_wait++; /* indicate that we are waiting */
+ pthread_cleanup_push(devlock_read_release, (void *)rwl);
+ while (rwl->w_active) {
+ stat = pthread_cond_wait(&rwl->read, &rwl->mutex);
+ if (stat != 0) {
+ break; /* error, bail out */
+ }
+ }
+ pthread_cleanup_pop(0);
+ rwl->r_wait--; /* we are no longer waiting */
+ }
+ if (stat == 0) {
+ rwl->r_active++; /* we are running */
+ }
+ pthread_mutex_unlock(&rwl->mutex);
+ return stat;
+}
+
+/*
+ * Attempt to lock for read access, don't wait
+ */
+int devlock::readtrylock()
+{
+ devlock *rwl = this;
+ int stat, stat2;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ if (rwl->w_active) {
+ stat = EBUSY;
+ } else {
+ rwl->r_active++; /* we are running */
+ }
+ stat2 = pthread_mutex_unlock(&rwl->mutex);
+ return (stat == 0 ? stat2 : stat);
+}
+
+/*
+ * Unlock read lock
+ */
+int devlock::readunlock()
+{
+ devlock *rwl = this;
+ int stat, stat2;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ rwl->r_active--;
+ if (rwl->r_active == 0 && rwl->w_wait > 0) { /* if writers waiting */
+ stat = pthread_cond_broadcast(&rwl->write);
+ }
+ stat2 = pthread_mutex_unlock(&rwl->mutex);
+ return (stat == 0 ? stat2 : stat);
+}
+
+
+/*
+ * Lock for write access, wait until locked (or error).
+ * Multiple nested write locking is permitted.
+ */
+int devlock::writelock(int areason, bool acan_take)
+{
+ devlock *rwl = this;
+ int stat;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) {
+ rwl->w_active++;
+ pthread_mutex_unlock(&rwl->mutex);
+ return 0;
+ }
+ lmgr_pre_lock(rwl, rwl->priority, __FILE__, __LINE__);
+ if (rwl->w_active || rwl->r_active > 0) {
+ rwl->w_wait++; /* indicate that we are waiting */
+ pthread_cleanup_push(devlock_write_release, (void *)rwl);
+ while (rwl->w_active || rwl->r_active > 0) {
+ if ((stat = pthread_cond_wait(&rwl->write, &rwl->mutex)) != 0) {
+ lmgr_do_unlock(rwl);
+ break; /* error, bail out */
+ }
+ }
+ pthread_cleanup_pop(0);
+ rwl->w_wait--; /* we are no longer waiting */
+ }
+ if (stat == 0) {
+ rwl->w_active++; /* we are running */
+ rwl->writer_id = pthread_self(); /* save writer thread's id */
+ lmgr_post_lock();
+ }
+ rwl->reason = areason;
+ rwl->can_take = acan_take;
+ pthread_mutex_unlock(&rwl->mutex);
+ return stat;
+}
+
+/*
+ * Attempt to lock for write access, don't wait
+ */
+int devlock::writetrylock()
+{
+ devlock *rwl = this;
+ int stat, stat2;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) {
+ rwl->w_active++;
+ pthread_mutex_unlock(&rwl->mutex);
+ return 0;
+ }
+ if (rwl->w_active || rwl->r_active > 0) {
+ stat = EBUSY;
+ } else {
+ rwl->w_active = 1; /* we are running */
+ rwl->writer_id = pthread_self(); /* save writer thread's id */
+ lmgr_do_lock(rwl, rwl->priority, __FILE__, __LINE__);
+ }
+ stat2 = pthread_mutex_unlock(&rwl->mutex);
+ return (stat == 0 ? stat2 : stat);
+}
+
+/*
+ * Unlock write lock
+ * Start any waiting writers in preference to waiting readers
+ */
+int devlock::writeunlock()
+{
+ devlock *rwl = this;
+ int stat, stat2;
+
+ if (rwl->valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
+ return stat;
+ }
+ if (rwl->w_active <= 0) {
+ pthread_mutex_unlock(&rwl->mutex);
+ Jmsg0(NULL, M_ABORT, 0, _("writeunlock called too many times.\n"));
+ }
+ rwl->w_active--;
+ if (!pthread_equal(pthread_self(), rwl->writer_id)) {
+ pthread_mutex_unlock(&rwl->mutex);
+ Jmsg0(NULL, M_ABORT, 0, _("writeunlock by non-owner.\n"));
+ }
+ if (rwl->w_active > 0) {
+ stat = 0; /* writers still active */
+ } else {
+ lmgr_do_unlock(rwl);
+ /* No more writers, awaken someone */
+ if (rwl->r_wait > 0) { /* if readers waiting */
+ stat = pthread_cond_broadcast(&rwl->read);
+ } else if (rwl->w_wait > 0) {
+ stat = pthread_cond_broadcast(&rwl->write);
+ }
+ }
+ stat2 = pthread_mutex_unlock(&rwl->mutex);
+ return (stat == 0 ? stat2 : stat);
+}
+
+int devlock::take_lock(take_lock_t *hold, int areason)
+{
+ int stat;
+
+ if (valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&mutex)) != 0) {
+ return stat;
+ }
+ hold->reason = reason;
+ hold->prev_reason = prev_reason;
+ hold->writer_id = writer_id;
+ reason = areason;
+ writer_id = pthread_self();
+ stat = pthread_mutex_unlock(&mutex);
+ return stat;
+}
+
+int devlock::return_lock(take_lock_t *hold)
+{
+ int stat, stat2;
+
+ if (valid != DEVLOCK_VALID) {
+ return EINVAL;
+ }
+ if ((stat = pthread_mutex_lock(&mutex)) != 0) {
+ return stat;
+ }
+ reason = hold->reason;
+ prev_reason = hold->prev_reason;
+ writer_id = hold->writer_id;
+ writer_id = pthread_self();
+ stat2 = pthread_mutex_unlock(&mutex);
+ if (w_active || w_wait) {
+ stat = pthread_cond_broadcast(&write);
+ }
+ return (stat == 0 ? stat2 : stat);
+
+}
+
+#ifdef TEST_RWLOCK
+
+#define THREADS 300
+#define DATASIZE 15
+#define ITERATIONS 1000000
+
+/*
+ * Keep statics for each thread.
+ */
+typedef struct thread_tag {
+ int thread_num;
+ pthread_t thread_id;
+ int writes;
+ int reads;
+ int interval;
+} thread_t;
+
+/*
+ * Read/write lock and shared data.
+ */
+typedef struct data_tag {
+ brwlock_t lock;
+ int data;
+ int writes;
+} data_t;
+
+static thread_t threads[THREADS];
+static data_t data[DATASIZE];
+
+/*
+ * Thread start routine that uses read/write locks.
+ */
+void *thread_routine(void *arg)
+{
+ thread_t *self = (thread_t *)arg;
+ int repeats = 0;
+ int iteration;
+ int element = 0;
+ int status;
+
+ for (iteration=0; iteration < ITERATIONS; iteration++) {
+ /*
+ * Each "self->interval" iterations, perform an
+ * update operation (write lock instead of read
+ * lock).
+ */
+// if ((iteration % self->interval) == 0) {
+ status = writelock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Write lock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ data[element].data = self->thread_num;
+ data[element].writes++;
+ self->writes++;
+ status = writelock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Write lock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ data[element].data = self->thread_num;
+ data[element].writes++;
+ self->writes++;
+ status = writeunlock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Write unlock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ status = writeunlock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Write unlock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+
+#ifdef xxx
+ } else {
+ /*
+ * Look at the current data element to see whether
+ * the current thread last updated it. Count the
+ * times to report later.
+ */
+ status = readlock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Read lock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ self->reads++;
+ if (data[element].data == self->thread_num)
+ repeats++;
+ status = readunlock(&data[element].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Read unlock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ }
+#endif
+ element++;
+ if (element >= DATASIZE) {
+ element = 0;
+ }
+ }
+ if (repeats > 0) {
+ Pmsg2(000, _("Thread %d found unchanged elements %d times\n"),
+ self->thread_num, repeats);
+ }
+ return NULL;
+}
+
+int main (int argc, char *argv[])
+{
+ int count;
+ int data_count;
+ int status;
+ unsigned int seed = 1;
+ int thread_writes = 0;
+ int data_writes = 0;
+
+#ifdef sun
+ /*
+ * On Solaris 2.5, threads are not timesliced. To ensure
+ * that our threads can run concurrently, we need to
+ * increase the concurrency level to THREADS.
+ */
+ thr_setconcurrency (THREADS);
+#endif
+
+ /*
+ * Initialize the shared data.
+ */
+ for (data_count = 0; data_count < DATASIZE; data_count++) {
+ data[data_count].data = 0;
+ data[data_count].writes = 0;
+ status = rwl_init(&data[data_count].lock);
+ if (status != 0) {
+ berrno be;
+ printf("Init rwlock failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ }
+
+ /*
+ * Create THREADS threads to access shared data.
+ */
+ for (count = 0; count < THREADS; count++) {
+ threads[count].thread_num = count + 1;
+ threads[count].writes = 0;
+ threads[count].reads = 0;
+ threads[count].interval = rand_r(&seed) % 71;
+ if (threads[count].interval <= 0) {
+ threads[count].interval = 1;
+ }
+ status = pthread_create (&threads[count].thread_id,
+ NULL, thread_routine, (void*)&threads[count]);
+ if (status != 0 || (int)threads[count].thread_id == 0) {
+ berrno be;
+ printf("Create thread failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ }
+
+ /*
+ * Wait for all threads to complete, and collect
+ * statistics.
+ */
+ for (count = 0; count < THREADS; count++) {
+ status = pthread_join (threads[count].thread_id, NULL);
+ if (status != 0) {
+ berrno be;
+ printf("Join thread failed. ERR=%s\n", be.bstrerror(status));
+ exit(1);
+ }
+ thread_writes += threads[count].writes;
+ printf (_("%02d: interval %d, writes %d, reads %d\n"),
+ count, threads[count].interval,
+ threads[count].writes, threads[count].reads);
+ }
+
+ /*
+ * Collect statistics for the data.
+ */
+ for (data_count = 0; data_count < DATASIZE; data_count++) {
+ data_writes += data[data_count].writes;
+ printf (_("data %02d: value %d, %d writes\n"),
+ data_count, data[data_count].data, data[data_count].writes);
+ rwl_destroy (&data[data_count].lock);
+ }
+
+ printf (_("Total: %d thread writes, %d data writes\n"),
+ thread_writes, data_writes);
+ return 0;
+}
+
+#endif
+
+#ifdef TEST_RW_TRY_LOCK
+/*
+ * brwlock_try_main.c
+ *
+ * Demonstrate use of non-blocking read-write locks.
+ *
+ * Special notes: On a Solaris system, call thr_setconcurrency()
+ * to allow interleaved thread execution, since threads are not
+ * timesliced.
+ */
+#include <pthread.h>
+#include "rwlock.h"
+#include "errors.h"
+
+#define THREADS 5
+#define ITERATIONS 1000
+#define DATASIZE 15
+
+/*
+ * Keep statistics for each thread.
+ */
+typedef struct thread_tag {
+ int thread_num;
+ pthread_t thread_id;
+ int r_collisions;
+ int w_collisions;
+ int updates;
+ int interval;
+} thread_t;
+
+/*
+ * Read-write lock and shared data
+ */
+typedef struct data_tag {
+ brwlock_t lock;
+ int data;
+ int updates;
+} data_t;
+
+thread_t threads[THREADS];
+data_t data[DATASIZE];
+
+/*
+ * Thread start routine that uses read-write locks
+ */
+void *thread_routine (void *arg)
+{
+ thread_t *self = (thread_t*)arg;
+ int iteration;
+ int element;
+ int status;
+ lmgr_init_thread();
+ element = 0; /* Current data element */
+
+ for (iteration = 0; iteration < ITERATIONS; iteration++) {
+ if ((iteration % self->interval) == 0) {
+ status = rwl_writetrylock (&data[element].lock);
+ if (status == EBUSY)
+ self->w_collisions++;
+ else if (status == 0) {
+ data[element].data++;
+ data[element].updates++;
+ self->updates++;
+ rwl_writeunlock (&data[element].lock);
+ } else
+ err_abort (status, _("Try write lock"));
+ } else {
+ status = rwl_readtrylock (&data[element].lock);
+ if (status == EBUSY)
+ self->r_collisions++;
+ else if (status != 0) {
+ err_abort (status, _("Try read lock"));
+ } else {
+ if (data[element].data != data[element].updates)
+ printf ("%d: data[%d] %d != %d\n",
+ self->thread_num, element,
+ data[element].data, data[element].updates);
+ rwl_readunlock (&data[element].lock);
+ }
+ }
+
+ element++;
+ if (element >= DATASIZE)
+ element = 0;
+ }
+ lmgr_cleanup_thread();
+ return NULL;
+}
+
+int main (int argc, char *argv[])
+{
+ int count, data_count;
+ unsigned int seed = 1;
+ int thread_updates = 0, data_updates = 0;
+ int status;
+
+#ifdef sun
+ /*
+ * On Solaris 2.5, threads are not timesliced. To ensure
+ * that our threads can run concurrently, we need to
+ * increase the concurrency level to THREADS.
+ */
+ DPRINTF (("Setting concurrency level to %d\n", THREADS));
+ thr_setconcurrency (THREADS);
+#endif
+
+ /*
+ * Initialize the shared data.
+ */
+ for (data_count = 0; data_count < DATASIZE; data_count++) {
+ data[data_count].data = 0;
+ data[data_count].updates = 0;
+ rwl_init(&data[data_count].lock);
+ }
+
+ /*
+ * Create THREADS threads to access shared data.
+ */
+ for (count = 0; count < THREADS; count++) {
+ threads[count].thread_num = count;
+ threads[count].r_collisions = 0;
+ threads[count].w_collisions = 0;
+ threads[count].updates = 0;
+ threads[count].interval = rand_r (&seed) % ITERATIONS;
+ status = pthread_create (&threads[count].thread_id,
+ NULL, thread_routine, (void*)&threads[count]);
+ if (status != 0)
+ err_abort (status, _("Create thread"));
+ }
+
+ /*
+ * Wait for all threads to complete, and collect
+ * statistics.
+ */
+ for (count = 0; count < THREADS; count++) {
+ status = pthread_join (threads[count].thread_id, NULL);
+ if (status != 0)
+ err_abort (status, _("Join thread"));
+ thread_updates += threads[count].updates;
+ printf (_("%02d: interval %d, updates %d, "
+ "r_collisions %d, w_collisions %d\n"),
+ count, threads[count].interval,
+ threads[count].updates,
+ threads[count].r_collisions, threads[count].w_collisions);
+ }
+
+ /*
+ * Collect statistics for the data.
+ */
+ for (data_count = 0; data_count < DATASIZE; data_count++) {
+ data_updates += data[data_count].updates;
+ printf (_("data %02d: value %d, %d updates\n"),
+ data_count, data[data_count].data, data[data_count].updates);
+ rwl_destroy (&data[data_count].lock);
+ }
+
+ return 0;
+}
+
+#endif
--- /dev/null
+/*
+ Bacula® - The Network Backup Solution
+
+ Copyright (C) 2001-2010 Free Software Foundation Europe e.V.
+
+ The main author of Bacula is Kern Sibbald, with contributions from
+ many others, a complete list can be found in the file AUTHORS.
+ This program is Free Software; you can redistribute it and/or
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Bacula® is a registered trademark of Kern Sibbald.
+ The licensor of Bacula is the Free Software Foundation Europe
+ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
+ Switzerland, email:ftf@fsfeurope.org.
+*/
+/*
+ * Bacula Thread Read/Write locking code. It permits
+ * multiple readers but only one writer.
+ *
+ * Kern Sibbald, January MMI
+ *
+ * This code adapted from "Programming with POSIX Threads", by
+ * David R. Butenhof
+ *
+ */
+
+#ifndef __DEVLOCK_H
+#define __DEVLOCK_H 1
+
+struct take_lock_t {
+ pthread_t writer_id; /* id of writer */
+ int reason; /* save reason */
+ int prev_reason; /* previous reason */
+};
+
+
+class devlock {
+private:
+ pthread_mutex_t mutex;
+ pthread_cond_t read; /* wait for read */
+ pthread_cond_t write; /* wait for write */
+ pthread_t writer_id; /* writer's thread id */
+ int priority; /* used in deadlock detection */
+ int valid; /* set when valid */
+ int r_active; /* readers active */
+ int w_active; /* writers active */
+ int r_wait; /* readers waiting */
+ int w_wait; /* writers waiting */
+ int reason; /* reason for lock */
+ int prev_reason; /* previous reason */
+ bool can_take; /* can the lock be taken? */
+
+
+public:
+ devlock(int reason, bool can_take=false);
+ ~devlock();
+ int init(int priority);
+ int destroy();
+ int take_lock(take_lock_t *hold, int reason);
+ int return_lock(take_lock_t *hold);
+ void new_reason(int nreason) { prev_reason = reason; reason = nreason; };
+ void restore_reason() { reason = prev_reason; prev_reason = 0; };
+
+ int writelock(int reason, bool can_take=false);
+ int writetrylock();
+ int writeunlock();
+ void write_release();
+
+ int readunlock();
+ int readlock();
+ int readtrylock();
+ void read_release();
+
+};
+
+
+#define DEVLOCK_VALID 0xfadbec
+
+#define DEVLOCK_INIIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, \
+ PTHREAD_COND_INITIALIZER, DEVOCK_VALID, 0, 0, 0, 0}
+
+#endif /* __DEVLOCK_H */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2004-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2004-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
/*
* Written by Kern Sibbald MMIV
*
- * Version $Id$
*/
jcr->VolumeName[0] = 0;
jcr->errmsg = get_pool_memory(PM_MESSAGE);
jcr->errmsg[0] = 0;
+ jcr->comment = get_pool_memory(PM_FNAME);
+ jcr->comment[0] = 0;
/* Setup some dummy values */
bstrncpy(jcr->Job, "*System*", sizeof(jcr->Job));
jcr->JobId = 0;
free_guid_list(jcr->id_list);
jcr->id_list = NULL;
}
+ if (jcr->comment) {
+ free_pool_memory(jcr->comment);
+ jcr->comment = NULL;
+ }
free(jcr);
}
}
}
+/*
+ * Return number of Jobs
+ */
+int job_count()
+{
+ JCR *jcr;
+ int count = 0;
+
+ lock_jcr_chain();
+ for (jcr = (JCR *)jcrs->first(); (jcr = (JCR *)jcrs->next(jcr)); ) {
+ if (jcr->JobId > 0) {
+ count++;
+ }
+ }
+ unlock_jcr_chain();
+ return count;
+}
+
/*
* Setup to call the timeout check routine every 30 seconds
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, MM
*
- * Version $Id$
*/
#ifndef __MEM_POOL_H_
bool prt_kaboom = false; /* Print kaboom output */
utime_t daemon_start_time = 0; /* Daemon start time */
const char *version = VERSION " (" BDATE ")";
+const char *dist_name = DISTNAME " " DISTVER;
+const int beef = BEEF;
char my_name[30]; /* daemon name is stored here */
char host_name[50]; /* host machine name */
char *exepath = (char *)NULL;
void lock_jobs();
void unlock_jobs();
JCR *jcr_walk_start();
+int job_count();
JCR *jcr_walk_next(JCR *prev_jcr);
void jcr_walk_end(JCR *jcr);
JCR *get_jcr_from_tsd();
setFileAttributes
};
+/*
+ * Plugin called here when it is first loaded
+ */
bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs)
{
bfuncs = lbfuncs; /* set Bacula funct pointers */
return bRC_OK;
}
+/*
+ * Plugin called here when it is unloaded, normally when
+ * Bacula is going to exit.
+ */
bRC unloadPlugin()
{
printf("plugin: Unloaded\n");
return bRC_OK;
}
+/*
+ * Called here to make a new instance of the plugin -- i.e. when
+ * a new Job is started. There can be multiple instances of
+ * each plugin that are running at the same time. Your
+ * plugin instance must be thread safe and keep its own
+ * local data.
+ */
static bRC newPlugin(bpContext *ctx)
{
int JobId = 0;
return bRC_OK;
}
+/*
+ * Release everything concerning a particular instance of a
+ * plugin. Normally called when the Job terminates.
+ */
static bRC freePlugin(bpContext *ctx)
{
int JobId = 0;
return bRC_OK;
}
+/*
+ * Called by core code to get a variable from the plugin.
+ * Not currently used.
+ */
static bRC getPluginValue(bpContext *ctx, pVariable var, void *value)
{
// printf("plugin: getPluginValue var=%d\n", var);
return bRC_OK;
}
+/*
+ * Called by core code to set a plugin variable.
+ * Not currently used.
+ */
static bRC setPluginValue(bpContext *ctx, pVariable var, void *value)
{
// printf("plugin: setPluginValue var=%d\n", var);
return bRC_OK;
}
+/*
+ * Called by Bacula when there are certain events that the
+ * plugin might want to know. The value depends on the
+ * event.
+ */
static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value)
{
char *name;
return bRC_OK;
}
+/*
+ * Called when starting to backup a file. Here the plugin must
+ * return the "stat" packet for the directory/file and provide
+ * certain information so that Bacula knows what the file is.
+ * The plugin can create "Virtual" files by giving them a
+ * name that is not normally found on the file system.
+ */
static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp)
{
return bRC_OK;
}
+/*
+ * Done backing up a file.
+ */
static bRC endBackupFile(bpContext *ctx)
{
return bRC_OK;
}
/*
- * Do actual I/O
+ * Do actual I/O. Bacula calls this after startBackupFile
+ * or after startRestoreFile to do the actual file
+ * input or output.
*/
static bRC pluginIO(bpContext *ctx, struct io_pkt *io)
{
return bRC_OK;
}
+/*
+ * Called here to give the plugin the information needed to
+ * re-create the file on a restore. It basically gets the
+ * stat packet that was created during the backup phase.
+ * This data is what is needed to create the file, but does
+ * not contain actual file data.
+ */
static bRC createFile(bpContext *ctx, struct restore_pkt *rp)
{
return bRC_OK;
}
+/*
+ * Called after the file has been restored. This can be used to
+ * set directory permissions, ...
+ */
static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp)
{
return bRC_OK;
char line[MAXSTRING];
alert = get_pool_memory(PM_FNAME);
alert = edit_device_codes(dcr, alert, dcr->device->alert_command, "");
- bpipe = open_bpipe(alert, 0, "r");
+ /* Wait maximum 5 minutes */
+ bpipe = open_bpipe(alert, 60 * 5, "r");
if (bpipe) {
while (fgets(line, sizeof(line), bpipe->rfd)) {
Jmsg(jcr, M_ALERT, 0, _("Alert: %s"), line);
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
* Append code for Storage daemon
* Kern Sibbald, May MM
*
- * Version $Id$
*/
#include "bacula.h"
*/
dcr->VolFirstIndex = dcr->VolLastIndex = 0;
jcr->run_time = time(NULL); /* start counting time for rates */
- for (last_file_index = 0; ok && !job_canceled(jcr); ) {
+ for (last_file_index = 0; ok && !jcr->is_job_canceled(); ) {
/* Read Stream header from the File daemon.
* The stream header consists of the following:
/* Read data stream from the File daemon.
* The data stream is just raw bytes
*/
- while ((n=bget_msg(fd)) > 0 && !job_canceled(jcr)) {
+ while ((n=bget_msg(fd)) > 0 && !jcr->is_job_canceled()) {
rec.VolSessionId = jcr->VolSessionId;
rec.VolSessionTime = jcr->VolSessionTime;
rec.FileIndex = file_index;
FI_to_ascii(buf1, rec.FileIndex), rec.VolSessionId,
stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len);
- /* Send attributes and digest to Director for Catalog */
- if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX ||
- crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) {
- if (!jcr->no_attributes) {
- BSOCK *dir = jcr->dir_bsock;
- if (are_attributes_spooled(jcr)) {
- dir->set_spooling();
- }
- Dmsg0(850, "Send attributes to dir.\n");
- if (!dir_update_file_attributes(dcr, &rec)) {
- dir->clear_spooling();
- Jmsg(jcr, M_FATAL, 0, _("Error updating file attributes. ERR=%s\n"),
- dir->bstrerror());
- ok = false;
- break;
- }
- dir->clear_spooling();
- }
- }
+ send_attrs_to_dir(jcr, &rec);
Dmsg0(650, "Enter bnet_get\n");
}
Dmsg1(650, "End read loop with FD. Stat=%d\n", n);
if (fd->is_error()) {
- if (!job_canceled(jcr)) {
+ if (!jcr->is_job_canceled()) {
Dmsg1(350, "Network read error from FD. ERR=%s\n", fd->bstrerror());
Jmsg1(jcr, M_FATAL, 0, _("Network error reading from FD. ERR=%s\n"),
fd->bstrerror());
if (ok || dev->can_write()) {
if (!write_session_label(dcr, EOS_LABEL)) {
/* Print only if ok and not cancelled to avoid spurious messages */
- if (ok && !job_canceled(jcr)) {
+ if (ok && !jcr->is_job_canceled()) {
Jmsg1(jcr, M_FATAL, 0, _("Error writing end session label. ERR=%s\n"),
dev->bstrerror());
}
/* Flush out final partial block of this session */
if (!write_block_to_device(dcr)) {
/* Print only if ok and not cancelled to avoid spurious messages */
- if (ok && !job_canceled(jcr)) {
+ if (ok && !jcr->is_job_canceled()) {
Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"),
dev->print_name(), dev->bstrerror());
Dmsg0(100, _("Set ok=FALSE after write_block_to_device.\n"));
*/
release_device(dcr);
- if (!ok || job_canceled(jcr)) {
+ if (!ok || jcr->is_job_canceled()) {
discard_attribute_spool(jcr);
} else {
commit_attribute_spool(jcr);
Dmsg1(100, "return from do_append_data() ok=%d\n", ok);
return ok;
}
+
+
+/* Send attributes and digest to Director for Catalog */
+bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec)
+{
+ int stream = rec->Stream;
+
+ if (stream == STREAM_UNIX_ATTRIBUTES ||
+ stream == STREAM_UNIX_ATTRIBUTES_EX ||
+ stream == STREAM_RESTORE_OBJECT ||
+ crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) {
+ if (!jcr->no_attributes) {
+ BSOCK *dir = jcr->dir_bsock;
+ if (are_attributes_spooled(jcr)) {
+ dir->set_spooling();
+ }
+ Dmsg0(850, "Send attributes to dir.\n");
+ if (!dir_update_file_attributes(jcr->dcr, rec)) {
+ Jmsg(jcr, M_FATAL, 0, _("Error updating file attributes. ERR=%s\n"),
+ dir->bstrerror());
+ dir->clear_spooling();
+ return false;
+ }
+ dir->clear_spooling();
+ }
+ }
+ return true;
+}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
free_jcr(ojcr);
}
jcr->JobId = JobId;
+ Dmsg2(800, "Start JobId=%d %p\n", JobId, jcr);
jcr->VolSessionId = newVolSessionId();
jcr->VolSessionTime = VolSessionTime;
bstrncpy(jcr->Job, job, sizeof(jcr->Job));
Dmsg3(50, "%s waiting %d sec for FD to contact SD key=%s\n",
jcr->Job, (int)(timeout.tv_sec-time(NULL)), jcr->sd_auth_key);
+ Dmsg2(800, "Wait FD for jid=%d %p\n", jcr->JobId, jcr);
/*
* Wait for the File daemon to contact us to start the Job,
if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) {
break;
}
+ Dmsg1(800, "=== Auth cond errstat=%d\n", errstat);
}
Dmsg3(50, "Auth=%d canceled=%d errstat=%d\n", jcr->authenticated,
job_canceled(jcr), errstat);
V(mutex);
+ Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr);
memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key));
if (jcr->authenticated && !job_canceled(jcr)) {
- Dmsg1(50, "Running job %s\n", jcr->Job);
+ Dmsg2(800, "Running jid=%d %p\n", jcr->JobId, jcr);
run_job(jcr); /* Run the job */
}
+ Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr);
return false;
}
*/
void stored_free_jcr(JCR *jcr)
{
- Dmsg1(900, "stored_free_jcr JobId=%u\n", jcr->JobId);
+ Dmsg2(800, "End Job JobId=%u %p\n", jcr->JobId, jcr);
+ if (jcr->dir_bsock) {
+ Dmsg2(800, "Send terminate jid=%d %p\n", jcr->JobId, jcr);
+ jcr->dir_bsock->signal(BNET_EOD);
+ jcr->dir_bsock->signal(BNET_TERMINATE);
+ }
if (jcr->file_bsock) {
jcr->file_bsock->close();
jcr->file_bsock = NULL;
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, 2000-2007. June 2007
*
- * Version $Id$
*/
#include "bacula.h" /* pull in global headers */
*/
void _block_device(const char *file, int line, DEVICE *dev, int state)
{
+// ASSERT(lmgr_mutex_is_locked(&dev->m_mutex) == 1);
ASSERT(dev->blocked() == BST_NOT_BLOCKED);
dev->set_blocked(state); /* make other threads wait */
dev->no_wait_id = pthread_self(); /* allow us to continue */
void _unblock_device(const char *file, int line, DEVICE *dev)
{
Dmsg3(sd_dbglvl, "unblock %s from %s:%d\n", dev->print_blocked(), file, line);
+// ASSERT(lmgr_mutex_is_locked(&dev->m_mutex) == 1);
ASSERT(dev->blocked());
dev->set_blocked(BST_NOT_BLOCKED);
clear_thread_id(dev->no_wait_id);
* Check that volcatinfo is good
*/
if (!dev->haveVolCatInfo()) {
- Dmsg0(010, "Do not have volcatinfo\n");
+ Dmsg0(100, "Do not have volcatinfo\n");
if (!find_a_volume()) {
goto mount_next_vol;
}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
/*
* Protypes for stored -- Kern Sibbald MM
*
- * Version $Id$
*/
/* From stored.c */
DCR *new_dcr(JCR *jcr, DCR *dcr, DEVICE *dev);
void free_dcr(DCR *dcr);
+/* From append.c */
+bool send_attrs_to_dir(JCR *jcr, DEV_RECORD *rec);
+
/* From askdir.c */
enum get_vol_info_rw {
GET_VOL_INFO_FOR_WRITE,
* Kern Sibbald, April MMI
* added BB02 format October MMII
*
- * Version $Id$
- *
*/
return "GZIP";
case STREAM_UNIX_ATTRIBUTES_EX:
return "UNIX-ATTR-EX";
+ case STREAM_RESTORE_OBJECT:
+ return "RESTORE-OBJECT";
case STREAM_SPARSE_DATA:
return "SPARSE-DATA";
case STREAM_SPARSE_GZIP_DATA:
return "contGZIP";
case -STREAM_UNIX_ATTRIBUTES_EX:
return "contUNIX-ATTR-EX";
+ case -STREAM_RESTORE_OBJECT:
+ return "contRESTORE-OBJECT";
case -STREAM_SPARSE_DATA:
return "contSPARSE-DATA";
case -STREAM_SPARSE_GZIP_DATA:
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, MM
*
- * Version $Id$
- *
*/
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
*
* Kern Sibbald, May MMIII
*
- * Version $Id$
*
*/
bstrftime_nc(dt, sizeof(dt), daemon_start_time);
- len = Mmsg(msg, _("Daemon started %s, %d Job%s run since started.\n"),
- dt, num_jobs_run, num_jobs_run == 1 ? "" : "s");
+ len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d, running=%d.\n"),
+ dt, num_jobs_run, job_count());
sendit(msg, len, sp);
len = Mmsg(msg, _(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
" -v verbose user messages\n"
" -? print this message.\n"
"\n"), 2000, VERSION, BDATE);
+
exit(1);
}
*/
/*
* Test program for listing files during regression testing
+ * Links have their permissions and time bashed since they cannot
+ * be set by Bacula.
*
* Kern Sibbald, MM
*
static void print_ls_output(char *fname, char *link, int type, struct stat *statp)
{
- char buf[1000];
+ char buf[2000];
char ec1[30];
char *p, *f;
int n;
p += n;
n = sprintf(p, "%-4d %-4d", (int)statp->st_uid, (int)statp->st_gid);
p += n;
- n = sprintf(p, "%7.7s ", edit_uint64(statp->st_size, ec1));
+ n = sprintf(p, "%10.10s ", edit_uint64(statp->st_size, ec1));
p += n;
if (S_ISCHR(statp->st_mode) || S_ISBLK(statp->st_mode)) {
n = sprintf(p, "%4x ", (int)statp->st_rdev);
#undef VERSION
#define VERSION "5.0.3"
-#define BDATE "24 July 2010"
-#define LSMDATE "24Jul10"
+#define BDATE "31 July 2010"
+#define LSMDATE "31Jul10"
#define PROG_COPYRIGHT "Copyright (C) %d-2010 Free Software Foundation Europe e.V.\n"
#define BYEAR "2010" /* year for copyright messages in progs */
#define TRACE_FILE 1
/* If this is set stdout will not be closed on startup */
-/* #define DEVELOPER 1 */
+#define DEVELOPER 1
/*
* SMCHECK does orphaned buffer checking (memory leaks)
/* #define TRACE_RES 1 */
/* #define DEBUG_MEMSET 1 */
/* #define DEBUG_MUTEX 1 */
+#define BEEF 0
/*
* Set SMALLOC_SANITY_CHECK to zero to turn off, otherwise
VALUE "FileDescription", "Bacula File daemon for Win32\0"
VALUE "FileVersion", VERSION "\0"
VALUE "InternalName", "Bacula\0"
- VALUE "LegalCopyright", "Copyright Free Software Foundation Europe e.V., 1999-2009\0"
+ VALUE "LegalCopyright", "Copyright 2010 Free Software Foundation Europe e.V., 1999-2009\0"
VALUE "LegalTrademarks", "Licensed under GNU AGPLv3\0"
VALUE "OriginalFilename", "bacula-fd.exe\0"
VALUE "PrivateBuild", "\0"
- VALUE "ProductName", "Bacula - Win32 Version\0"
+ VALUE "ProductName", "Bacula - Windows Version\0"
VALUE "ProductVersion", VERSION
VALUE "SpecialBuild", "\0"
END
LTEXT "For more information, see:",-1,115,60,100,10
LTEXT " www.bacula.org",-1,115,70,100,10
LTEXT "Copyright (C) 1999-2010, Free Software Foundation Europe e.V.",-1,7,120,175,10
- LTEXT "Licensed under GNU AGPLv3.",-1,7,130,175,10
+ LTEXT "Licensed under GNU AGPLv3",-1,7,130,175,10
RTEXT "Build Date:",-1,108,24,42,8
RTEXT "Bacula Version:",-1,100,9,50,8
LTEXT VERSION,-1,159,10,65,8
for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ; do
echo "$i. Doing $1 at `date +%R:%S`"
nice $1 >1
- if [ $? -ne 0 ] ; then
- echo "Exit $?"
+ r=$?
+ if [ $r -ne 0 ] ; then
+ echo "Exit $r"
exit 1
fi
done
#! /bin/sh
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# Free Software Foundation, Inc.
-timestamp='2007-03-06'
+timestamp='2009-06-10'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
+ | grep -q __ELF__
then
# Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
# Return netbsd for either. FIX?
case `/usr/bin/uname -p` in
sparc) echo sparc-icl-nx7; exit ;;
esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
sun4H:SunOS:5.*:*)
echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
- i86pc:SunOS:5.*:*)
- echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4*:SunOS:6*:*)
# According to config.sub, this is the proper way to canonicalize
echo rs6000-ibm-aix3.2
fi
exit ;;
- *:AIX:*:[45])
+ *:AIX:*:[456])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
# => hppa64-hp-hpux11.23
if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
+ grep -q __LP64__
then
HP_ARCH="hppa2.0w"
else
exit ;;
*:Interix*:[3456]*)
case ${UNAME_MACHINE} in
- x86)
+ x86)
echo i586-pc-interix${UNAME_RELEASE}
exit ;;
- EM64T | authenticamd)
+ EM64T | authenticamd | genuineintel)
echo x86_64-unknown-interix${UNAME_RELEASE}
exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
esac ;;
[345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
echo i${UNAME_MACHINE}-pc-mks
exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
echo ${UNAME_MACHINE}-pc-minix
exit ;;
arm*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ fi
exit ;;
avr32*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
m68*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
+ mips:Linux:*:* | mips64:Linux:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#undef CPU
- #undef mips64
- #undef mips64el
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
+ CPU=${UNAME_MACHINE}el
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
+ CPU=${UNAME_MACHINE}
#else
CPU=
#endif
EV67) UNAME_MACHINE=alphaev67 ;;
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ objdump --private-headers /bin/sh | grep -q ld.so.1
if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-gnu
+ exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
x86_64:Linux:*:*)
echo x86_64-unknown-linux-gnu
exit ;;
- xtensa:Linux:*:*)
- echo xtensa-unknown-linux-gnu
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
# The BFD linker knows what the default object file format is, so
elf32-i386)
TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- coff-i386)
- echo "${UNAME_MACHINE}-pc-linux-gnucoff"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
esac
# Determine whether the default compiler is a.out or elf
eval $set_cc_for_build
i*86:syllable:*:*)
echo ${UNAME_MACHINE}-pc-syllable
exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
echo i386-unknown-lynxos${UNAME_RELEASE}
exit ;;
i*86:*DOS:*:*)
pc:*:*:*)
# Left here for compatibility:
# uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i386.
- echo i386-pc-msdosdjgpp
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
exit ;;
Intel:Mach:3*:*)
echo i386-pc-mach3
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
echo m68k-unknown-lynxos${UNAME_RELEASE}
exit ;;
rs6000:LynxOS:2.*:*)
echo rs6000-unknown-lynxos${UNAME_RELEASE}
exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
echo powerpc-unknown-lynxos${UNAME_RELEASE}
exit ;;
SM[BE]S:UNIX_SV:*:*)
BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
echo i586-pc-beos
exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
i*86:rdos:*:*)
echo ${UNAME_MACHINE}-pc-rdos
exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
esac
#echo '(No uname command or uname output not recognized.)' 1>&2
the operating system you are using. It is advised that you
download the most up to date version of the config scripts from
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
and
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
If the version you run ($0) is already up to date, please
send the following data and any information you think might be
#! /bin/sh
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# Free Software Foundation, Inc.
-timestamp='2007-01-18'
+timestamp='2009-06-11'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
case $maybe_os in
nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
os=
basic_machine=$1
;;
+ -bluegene*)
+ os=-cnk
+ ;;
-sim | -cisco | -oki | -wec | -winbond)
os=
basic_machine=$1
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
+ | lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep \
+ | maxq | mb | microblaze | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
- | mips64vr | mips64vrel \
+ | mips64octeon | mips64octeonel \
| mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
| mips64vr4100 | mips64vr4100el \
| mips64vr4300 | mips64vr4300el \
| mips64vr5000 | mips64vr5000el \
| mipsisa64sr71k | mipsisa64sr71kel \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
+ | moxie \
| mt \
| msp430 \
| nios | nios2 \
| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
| pyramid \
| score \
- | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
| v850 | v850e \
| we32k \
| x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k)
+ | z8k | z80)
basic_machine=$basic_machine-unknown
;;
m6811 | m68hc11 | m6812 | m68hc12)
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
+ | lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
- | mips64vr-* | mips64vrel-* \
+ | mips64octeon-* | mips64octeonel-* \
| mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
| mips64vr4100-* | mips64vr4100el-* \
| mips64vr4300-* | mips64vr4300el-* \
| mips64vr5000-* | mips64vr5000el-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
| pyramid-* \
| romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
| tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
| tron-* \
| v850-* | v850e-* | vax-* \
| we32k-* \
| x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa-* \
+ | xstormy16-* | xtensa*-* \
| ymp-* \
- | z8k-*)
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
;;
# Recognize the various machine names and aliases which stand
# for a CPU type and a company and sometimes even an OS.
basic_machine=m68k-apollo
os=-bsd
;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
aux)
basic_machine=m68k-apple
os=-aux
basic_machine=ns32k-sequent
os=-dynix
;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
c90)
basic_machine=c90-cray
os=-unicos
;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
convex-c1)
basic_machine=c1-convex
os=-bsd
basic_machine=craynv-cray
os=-unicosmp
;;
- cr16c)
- basic_machine=cr16c-unknown
+ cr16)
+ basic_machine=cr16-unknown
os=-elf
;;
crds | unos)
basic_machine=m88k-motorola
os=-sysv3
;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
djgpp)
basic_machine=i586-pc
os=-msdosdjgpp
basic_machine=m68k-isi
os=-sysv
;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
m88k-omron*)
basic_machine=m88k-omron
;;
basic_machine=i386-pc
os=-mingw32
;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
miniframe)
basic_machine=m68000-convergent
;;
basic_machine=i860-intel
os=-osf
;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
pbd)
basic_machine=sparc-tti
;;
basic_machine=tic6x-unknown
os=-coff
;;
+ tile*)
+ basic_machine=tile-unknown
+ os=-linux-gnu
+ ;;
tx39)
basic_machine=mipstx39-unknown
;;
basic_machine=z8k-unknown
os=-sim
;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
none)
basic_machine=none-none
os=-none
we32k)
basic_machine=we32k-att
;;
- sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
basic_machine=sh-unknown
;;
sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
# Each alternative MUST END IN A *, to match a version number.
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -kopensolaris* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* \
+ | -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
| -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
-zvmoe)
os=-zvmoe
;;
+ -dicos*)
+ os=-dicos
+ ;;
-none)
;;
*)
-sunos*)
vendor=sun
;;
- -aix*)
+ -cnk*|-aix*)
vendor=ibm
;;
-beos*)
check_restore_diff()
{
if test "$debug" -eq 1 ; then
+ $rscripts/diff.pl -s ${src} -d ${tmp}/bacula-restores${src}
diff -ur ${src} ${tmp}/bacula-restores${src}
else
diff -ur ${src} ${tmp}/bacula-restores${src} 2>&1 >/dev/null
check_restore_bin_diff()
{
if test "$debug" -eq 1 ; then
+ $rscripts/diff.pl -s ${bin} -d ${tmp}/bacula-restores${bin}
diff -ur ${bin} ${tmp}/bacula-restores${bin}
else
diff -ur ${bin} ${tmp}/bacula-restores${bin} 2>&1 >/dev/null
check_restore_tmp_build_diff()
{
if test "$debug" -eq 1 ; then
+ $rscripts/diff.pl -s ${tmpsrc} -d ${tmp}/bacula-restores${tmpsrc}
diff -ur ${tmpsrc} ${tmp}/bacula-restores${tmpsrc}
else
diff -ur ${tmpsrc} ${tmp}/bacula-restores${tmpsrc} 2>&1 >/dev/null
export src
export tmpsrc
+bperl="perl -Mscripts::functions"
+export bperl
+
mkdir -p ${tmp}
touch ${tmp}/dir.out ${tmp}/fd.out ${tmp}/sd.out
# You can change the maximum concurrent jobs for any config file
# If specified, you can change only one Resource or one type of
# resource at the time (optional)
-# set_maximum_concurent_jobs('$conf/bacula-dir.conf', 100);
-# set_maximum_concurent_jobs('$conf/bacula-dir.conf', 100, 'Director');
-# set_maximum_concurent_jobs('$conf/bacula-dir.conf', 100, 'Device', 'Drive-0');
-sub set_maximum_concurent_jobs
+# set_maximum_concurrent_jobs('$conf/bacula-dir.conf', 100);
+# set_maximum_concurrent_jobs('$conf/bacula-dir.conf', 100, 'Director');
+# set_maximum_concurrent_jobs('$conf/bacula-dir.conf', 100, 'Device', 'Drive-0');
+sub set_maximum_concurrent_jobs
{
my ($file, $nb, $obj, $name) = @_;
- my ($cur_obj, $cur_name);
die "Can't get new maximumconcurrentjobs"
unless ($nb);
+ add_attribute($file, "Maximum Concurrent Jobs", $nb, $obj, $name);
+}
+
+
+# You can add option to a resource
+# add_attribute('$conf/bacula-dir.conf', 'FDTimeout', 1600, 'Director');
+# add_attribute('$conf/bacula-dir.conf', 'FDTimeout', 1600, 'Storage', 'FileStorage');
+sub add_attribute
+{
+ my ($file, $attr, $value, $obj, $name) = @_;
+ my ($cur_obj, $cur_name, $done);
+
open(FP, ">$tmp/1.$$") or die "Can't write to $tmp/1.$$";
open(SRC, $file) or die "Can't open $file";
while (my $l = <SRC>)
{
+ if ($l =~ /^#/) {
+ print FP $l;
+ next;
+ }
+
if ($l =~ /^(\w+) {/) {
$cur_obj = $1;
+ $done=0;
}
- if ($l =~ /maximum\s*concurrent\s*jobs/i) {
+ if ($l =~ /\Q$attr\E/i) {
if (!$obj || $cur_obj eq $obj) {
if (!$name || $cur_name eq $name) {
- $l =~ s/maximum\s*concurrent\s*jobs\s*=\s*\d+/Maximum Concurrent Jobs = $nb/ig;
+ $l =~ s/\Q$attr\E\s*=\s*.+/$attr = $value/ig;
+ $done=1
}
}
}
- if ($l =~ /Name\s*=\s*"?([\w\d\.-])"?/i) {
+ if ($l =~ /Name\s*=\s*"?([\w\d\.-]+)"?/i) {
$cur_name = $1;
}
if ($l =~ /^}/) {
+ if (!$done) {
+ if ($cur_obj eq $obj) {
+ if (!$name || $cur_name eq $name) {
+ $l = " $attr = $value\n$l";
+ }
+ }
+ }
$cur_name = $cur_obj = undef;
}
print FP $l;
mkdir $adir 2> /dev/null
+# work with $1 slots by default
+nb_slot=${1:-80}
+
# create the autochanger configuration file
cat > $adir/conf <<EOF
maxdrive=8
-maxslot=80
+maxslot=$nb_slot
dbgfile=$adir/log
#case $2 in
# turn on ach debug
touch $adir/log
-# create 75 volumes
-for i in `seq 1 75`; do
+nb_vol=`expr $nb_slot - 5`
+# create $nb_vol volumes
+for i in `seq 1 $nb_vol`; do
echo $i:vol$i >> $adir/barcodes
cp /dev/null $adir/slot$i
done
# make a cleaning tape
-echo 76:CLN01 >> $adir/barcodes
-cp /dev/null $adir/slot76
+cln_slot=`expr $nb_vol + 1`
+echo $cln_slot:CLN01 >> $adir/barcodes
+cp /dev/null $adir/slot$cnl_slot
# keep other empty
-for i in `seq 77 79`; do
+next_empty=`expr $cnl_slot + 1`
+for i in `seq $next_empty $nb_slot`; do
echo $i: >> $adir/barcodes
done
bacula/
regress/
+ This script requires perl to work (http://strawberryperl.com), and by default
+ it assumes that Bacula is installed in the standard location. Once it's
+ started on the windows, you can do remote commands like:
+ - start the service
+ - stop the service
+ - edit the bacula-fd.conf to change the director and password setting
+ - install a new binary version (not tested, no plugin support)
+ - create weird files and directories
+ - create files with windows attributes
+ - compare two directories (with md5)
+
+
+ To test it, you can follow this procedure
+ On the windows box:
+ - install perl from http://strawberryperl.com on windows
+ - copy or export regress directory somewhere on your windows
+ - start the regress/scripts/regress-win32.pl (open it with perl.exe)
+ - create c:/tmp (not sure it's mandatory)
+ - make sure that the firewall is well configured or just disabled (needs
+ bacula and 8091/tcp)
+
+ On Linux box:
+ - edit config file to fill the following variables
+
+ WIN32_CLIENT="win2008-fd"
+ # Client FQDN or IP address
+ WIN32_ADDR="192.168.0.6"
+ # File or Directory to backup. This is put in the "File" directive
+ # in the FileSet
+ WIN32_FILE="c:/tmp"
+ # Port of Win32 client
+ WIN32_PORT=9102
+ # Win32 Client password
+ WIN32_PASSWORD="xxx"
+ # will be the ip address of the linux box
+ WIN32_STORE_ADDR="192.168.0.1"
+
+ - type make setup
+ - run ./tests/backup-bacula-test to be sure that everything is ok
+ - start ./tests/win32-fd-test
+
+ I'm not very happy with this script, but it works :)
+
=cut
use strict;
change_jobname BackupClient1 $JobName
p() {
- echo "##############################################" >> ${cwd}/tmp/log1.out
- echo "$*" >> ${cwd}/tmp/log1.out
- echo "##############################################" >> ${cwd}/tmp/log2.out
- echo "$*" >> ${cwd}/tmp/log2.out
+ echo "##############################################" >> ${cwd}/tmp/log1.out
+ echo "$*" >> ${cwd}/tmp/log1.out
+ echo "##############################################" >> ${cwd}/tmp/log2.out
+ echo "$*" >> ${cwd}/tmp/log2.out
+ if test "$debug" -eq 1 ; then
+ echo "##############################################"
+ echo "$*"
+ fi
}
# cleanup
#!/bin/sh
-rm -rf tmp/disk-changer
-mkdir -p tmp/disk-changer
-touch tmp/disk-changer/conf
+. scripts/functions
+scripts/cleanup
+scripts/copy-2disk-confs
+scripts/prepare-disk-changer
echo "Unload drive 0"
$scripts/disk-changer tmp/disk-changer/conf unload 1 tmp/disk-changer/drive0 0
echo "rtn=$?"
@$out ${cwd}/tmp/log1.out
@# Force differental on the second Volume
update volume=TestVolume001 VolStatus=Used
-run level=differental job=$JobName yes
+run level=differential job=$JobName yes
wait
messages
END_OF_DATA
start_test
cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
+@output /dev/null
messages
@$out tmp/log1.out
+@#exec "sh -c 'touch ${cwd}/build/xxx_new_file'"
run job=VerifyVolume level=InitCatalog yes
wait
messages
@# now do a verify Catalog
@#
@$out ${cwd}/tmp/original
+@#exec "sh -c 'rm -f ${cwd}/build/xxx_new_file'"
run job=VerifyVolume level=Catalog yes
wait
messages