-Changes to 1.38.3:
+Release 1.38.3 05Jan06:
+04Jan06
+- Move the suitable_drive flag to a better place to prevent
+ premature termination of the reservation if all drives
+ are busy -- should fix Arno's diff/inc pool failures.
+26Dec05
+- Add mutex to single thread VSS code in Win32.
+
Beta release 23Dec05:
22Dec05
- Add OPENSSL_INC to console dependencies, lib dependencies, and
required only if you do not want the default /usr/share.
Items to note!!!
+- The Storage daemon now keeps track of what tapes it is using
+ (was not the case in 1.36.x). This means that you must be much
+ more careful when removing tapes and putting up a new one. In
+ general, you should always do a "unmount" prior to removing a
+ tape, and a "mount" after putting a new one into the drive.
- If you use an Autochanger, you MUST update your SD conf file
to use the new Autochanger resource. Otherwise, certain commands
such as "update slots" may not work.
files, called "parts".
- For the details of the Python scripting support, please see the new
Python Scripting chapter in the manual.
+- The default user/group for the Director and Storage daemon installed
+ by rpms is bacula/bacula, thus you may need to add additional permissions
+ to your database, or modify the permissions of the tape drive. If
+ all else fails, change to using user=root. However, it is more secure
+ to use user=bacula.
/* Set if Bacula conio support enabled */
#undef HAVE_CONIO
-/* Define if encryption support should be enabled */
-#undef HAVE_CRYPTO
-
/* Define to 1 if you have the <curses.h> header file. */
#undef HAVE_CURSES_H
/* Define to 1 if you have the `setsid' function. */
#undef HAVE_SETSID
-/* Define if the SHA-2 family of digest algorithms is available */
-#undef HAVE_SHA2
-
/* Define to 1 if you have the `signal' function. */
#undef HAVE_SIGNAL
support_gnome=no
support_wx_console=no
support_tls=no
-support_crypto=no
gnome_version=
wx_version=
support_static_tools=no
AC_TRY_LINK([ #include <openssl/ssl.h> ],
[ CRYPTO_set_id_callback(NULL); ],
- [
- support_tls="yes"
- support_crypto="yes"
- ],
+ [ support_tls="yes" ],
[ support_tls="no" ]
)
- AC_TRY_LINK([ #include <openssl/evp.h> ],
- [ EVP_sha512(); ],
- [ ac_cv_openssl_sha2="yes" ],
- [ ac_cv_openssl_sha2="no" ]
- )
-
LIBS="$saved_LIBS"
CFLAGS="$saved_CFLAGS"
if test "$support_tls" = "yes"; then
AC_DEFINE(HAVE_OPENSSL, 1, [Define if OpenSSL library is available])
AC_DEFINE(HAVE_TLS, 1, [Define if TLS support should be enabled])
- AC_DEFINE(HAVE_CRYPTO, 1, [Define if encryption support should be enabled])
- fi
-
- if test "$ac_cv_openssl_sha2" = "yes"; then
- AC_DEFINE(HAVE_SHA2, 1, [Define if the SHA-2 family of digest algorithms is available])
fi
else
support_tls="no"
- support_crypto="no"
OPENSSL_LIBS=""
OPENSSL_INC=""
fi
src/cats/drop_bacula_tables \
src/cats/drop_bacula_database \
src/findlib/Makefile \
- src/pygtk-console/Makefile \
src/tools/Makefile \
src/win32/winbacula.nsi \
src/win32/baculafd/bacula-fd.conf \
readline support: ${got_readline} ${PRTREADLINE_SRC}
TCP Wrappers support: ${TCPW_MSG} ${WRAPLIBS}
TLS support: ${support_tls}
- Encryption support: ${support_crypto}
ZLIB support: ${have_zlib}
enable-smartalloc: ${support_smartalloc}
enable-gnome: ${support_gnome} ${gnome_version}
support_gnome=no
support_wx_console=no
support_tls=no
-support_crypto=no
gnome_version=
wx_version=
support_static_tools=no
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }; }; then
-
- support_tls="yes"
- support_crypto="yes"
-
+ support_tls="yes"
else
echo "$as_me: failed program was:" >&5
sed 's/^/| /' conftest.$ac_ext >&5
support_tls="no"
-fi
-rm -f conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-
- cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h. */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h. */
- #include <openssl/evp.h>
-int
-main ()
-{
- EVP_sha512();
- ;
- return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
- (eval $ac_link) 2>conftest.er1
- ac_status=$?
- grep -v '^ *+' conftest.er1 >conftest.err
- rm -f conftest.er1
- cat conftest.err >&5
- echo "$as_me:$LINENO: \$? = $ac_status" >&5
- (exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag"
- || test ! -s conftest.err'
- { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
- (eval $ac_try) 2>&5
- ac_status=$?
- echo "$as_me:$LINENO: \$? = $ac_status" >&5
- (exit $ac_status); }; } &&
- { ac_try='test -s conftest$ac_exeext'
- { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
- (eval $ac_try) 2>&5
- ac_status=$?
- echo "$as_me:$LINENO: \$? = $ac_status" >&5
- (exit $ac_status); }; }; then
- ac_cv_openssl_sha2="yes"
-else
- echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
- ac_cv_openssl_sha2="no"
-
fi
rm -f conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
#define HAVE_TLS 1
_ACEOF
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_CRYPTO 1
-_ACEOF
-
- fi
-
- if test "$ac_cv_openssl_sha2" = "yes"; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_SHA2 1
-_ACEOF
-
fi
else
support_tls="no"
- support_crypto="no"
OPENSSL_LIBS=""
OPENSSL_INC=""
fi
exit 1
fi
- ac_config_files="$ac_config_files autoconf/Make.common Makefile scripts/startmysql scripts/stopmysql scripts/btraceback scripts/startit scripts/stopit scripts/bconsole scripts/gconsole scripts/bacula scripts/devel_bacula scripts/Makefile scripts/logrotate scripts/bacula.desktop.gnome1 scripts/bacula.desktop.gnome2 scripts/bacula.desktop.gnome1.consolehelper scripts/bacula.desktop.gnome2.consolehelper scripts/bacula.desktop.gnome1.xsu scripts/bacula.desktop.gnome2.xsu scripts/gnome-console.console_apps scripts/mtx-changer scripts/dvd-handler scripts/bacula-tray-monitor.desktop scripts/logwatch/Makefile scripts/logwatch/logfile.bacula.conf src/Makefile src/host.h src/console/Makefile src/console/bconsole.conf src/gnome2-console/Makefile src/gnome2-console/gnome-console.conf src/wx-console/Makefile src/wx-console/wx-console.conf src/tray-monitor/Makefile src/tray-monitor/tray-monitor.conf src/dird/Makefile src/dird/bacula-dir.conf src/lib/Makefile src/stored/Makefile src/stored/bacula-sd.conf src/filed/Makefile src/filed/bacula-fd.conf src/filed/win32/Makefile src/cats/Makefile src/cats/make_catalog_backup src/cats/delete_catalog_backup src/cats/create_postgresql_database src/cats/update_postgresql_tables src/cats/make_postgresql_tables src/cats/grant_postgresql_privileges src/cats/drop_postgresql_tables src/cats/drop_postgresql_database src/cats/create_mysql_database src/cats/update_mysql_tables src/cats/make_mysql_tables src/cats/grant_mysql_privileges src/cats/drop_mysql_tables src/cats/drop_mysql_database src/cats/create_sqlite_database src/cats/update_sqlite_tables src/cats/make_sqlite_tables src/cats/grant_sqlite_privileges src/cats/drop_sqlite_tables src/cats/drop_sqlite_database src/cats/create_sqlite3_database src/cats/update_sqlite3_tables src/cats/make_sqlite3_tables src/cats/grant_sqlite3_privileges src/cats/drop_sqlite3_tables src/cats/drop_sqlite3_database src/cats/sqlite src/cats/mysql src/cats/create_bdb_database src/cats/update_bdb_tables src/cats/make_bdb_tables src/cats/grant_bdb_privileges src/cats/drop_bdb_tables src/cats/drop_bdb_database src/cats/create_bacula_database src/cats/update_bacula_tables src/cats/grant_bacula_privileges src/cats/make_bacula_tables src/cats/drop_bacula_tables src/cats/drop_bacula_database src/findlib/Makefile src/pygtk-console/Makefile src/tools/Makefile src/win32/winbacula.nsi src/win32/baculafd/bacula-fd.conf src/win32/Makefile src/win32/console/bconsole.conf src/win32/wx-console/wx-console.conf src/win32/pebuilder/Makefile po/Makefile.in $PFILES"
+ ac_config_files="$ac_config_files autoconf/Make.common Makefile scripts/startmysql scripts/stopmysql scripts/btraceback scripts/startit scripts/stopit scripts/bconsole scripts/gconsole scripts/bacula scripts/devel_bacula scripts/Makefile scripts/logrotate scripts/bacula.desktop.gnome1 scripts/bacula.desktop.gnome2 scripts/bacula.desktop.gnome1.consolehelper scripts/bacula.desktop.gnome2.consolehelper scripts/bacula.desktop.gnome1.xsu scripts/bacula.desktop.gnome2.xsu scripts/gnome-console.console_apps scripts/mtx-changer scripts/dvd-handler scripts/bacula-tray-monitor.desktop scripts/logwatch/Makefile scripts/logwatch/logfile.bacula.conf src/Makefile src/host.h src/console/Makefile src/console/bconsole.conf src/gnome2-console/Makefile src/gnome2-console/gnome-console.conf src/wx-console/Makefile src/wx-console/wx-console.conf src/tray-monitor/Makefile src/tray-monitor/tray-monitor.conf src/dird/Makefile src/dird/bacula-dir.conf src/lib/Makefile src/stored/Makefile src/stored/bacula-sd.conf src/filed/Makefile src/filed/bacula-fd.conf src/filed/win32/Makefile src/cats/Makefile src/cats/make_catalog_backup src/cats/delete_catalog_backup src/cats/create_postgresql_database src/cats/update_postgresql_tables src/cats/make_postgresql_tables src/cats/grant_postgresql_privileges src/cats/drop_postgresql_tables src/cats/drop_postgresql_database src/cats/create_mysql_database src/cats/update_mysql_tables src/cats/make_mysql_tables src/cats/grant_mysql_privileges src/cats/drop_mysql_tables src/cats/drop_mysql_database src/cats/create_sqlite_database src/cats/update_sqlite_tables src/cats/make_sqlite_tables src/cats/grant_sqlite_privileges src/cats/drop_sqlite_tables src/cats/drop_sqlite_database src/cats/create_sqlite3_database src/cats/update_sqlite3_tables src/cats/make_sqlite3_tables src/cats/grant_sqlite3_privileges src/cats/drop_sqlite3_tables src/cats/drop_sqlite3_database src/cats/sqlite src/cats/mysql src/cats/create_bdb_database src/cats/update_bdb_tables src/cats/make_bdb_tables src/cats/grant_bdb_privileges src/cats/drop_bdb_tables src/cats/drop_bdb_database src/cats/create_bacula_database src/cats/update_bacula_tables src/cats/grant_bacula_privileges src/cats/make_bacula_tables src/cats/drop_bacula_tables src/cats/drop_bacula_database src/findlib/Makefile src/tools/Makefile src/win32/winbacula.nsi src/win32/baculafd/bacula-fd.conf src/win32/Makefile src/win32/console/bconsole.conf src/win32/wx-console/wx-console.conf src/win32/pebuilder/Makefile po/Makefile.in $PFILES"
ac_config_commands="$ac_config_commands default"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
"src/cats/drop_bacula_tables" ) CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_tables" ;;
"src/cats/drop_bacula_database" ) CONFIG_FILES="$CONFIG_FILES src/cats/drop_bacula_database" ;;
"src/findlib/Makefile" ) CONFIG_FILES="$CONFIG_FILES src/findlib/Makefile" ;;
- "src/pygtk-console/Makefile" ) CONFIG_FILES="$CONFIG_FILES src/pygtk-console/Makefile" ;;
"src/tools/Makefile" ) CONFIG_FILES="$CONFIG_FILES src/tools/Makefile" ;;
"src/win32/winbacula.nsi" ) CONFIG_FILES="$CONFIG_FILES src/win32/winbacula.nsi" ;;
"src/win32/baculafd/bacula-fd.conf" ) CONFIG_FILES="$CONFIG_FILES src/win32/baculafd/bacula-fd.conf" ;;
readline support: ${got_readline} ${PRTREADLINE_SRC}
TCP Wrappers support: ${TCPW_MSG} ${WRAPLIBS}
TLS support: ${support_tls}
- Encryption support: ${support_crypto}
ZLIB support: ${have_zlib}
enable-smartalloc: ${support_smartalloc}
enable-gnome: ${support_gnome} ${gnome_version}
Kern's ToDo List
- 07 December 2005
+ 26 December 2005
Major development:
Project Developer
- Does WildFile match against full name? Doc.
- %d and %v only valid on Director, not for ClientRunBefore/After.
+Priority:
+- Fix foreach_jcr() to have free_jcr() inside next().
+ jcr=jcr_walk_start();
+ for ( ; jcr; (jcr=jcr_walk_next(jcr)) )
+ ...
+ jcr_walk_end(jcr);
+- Implement status that shows why a job is being held in reserve, or
+ rather why none of the drives are suitable.
+- Implement a way to disable a drive (so you can use the second
+ drive of an autochanger, and the first one will not be used or
+ even defined).
+- Implement code that makes the Dir aware that a drive is an
+ autochanger (so the user doesn't need to use the Autochanger = yes
+ directive).
+
For 1.39:
- Make hardlink code at line 240 of find_one.c use binary search.
- Queue warning/error messages during restore so that they
--- /dev/null
+ Technical notes on version 1.37
+ Kern Sibbald
+
+General:
+
+Changes to 1.37.42:
+26Oct05
+- Return HARDEOF status from bnet_recv() if bsock NULL rather
+ than aborting.
+- Don't overwrite pthreadVCE.dll or msvcr71.dll during Win32
+ install.
+- Update README.win32 instructions.
+25Oct05
+- Make db_get_job_record() return Name so that the ACL
+ can be checked. Fixes bug #446
+22Oct05
+- Insure that all the SD tools init the Autochanger
+ resources.
+
+Changes to 1.37.41:
+22Oct05
+- Print error message if no Changer Command found.
+- Apply fix from Martin Simmons to clear structure before
+ using it in IPADDR -- fixes a bind() bug on AIX.
+14Oct05
+- Add NetBSD fstype patch from Geert Hendrickx <ghen@telenet.be>
+- Update num_parts only when writing to DVD.
+13Oct05
+- Fix error conditions in bpipe.c DVD routine (terminate buffer).
+- Use a bigger buffer 32K as suggested by Arno in bpipe.c.
+- Increase dvd mount timeout with patch from Arno.
+- Modify DVD code so that it keeps a state flag that indicates
+ when the freespace variable is valid. freespace_errno, now has
+ either 0 or an errno. There are no negative errnos.
+- Create is_freespace_ok() to test state flag. Also, set_freespace_ok()
+ and clear_freespace_ok(). Modify code to use them. This
+ simplifies a bit the logic of the freespace code.
+- Edit 64bit debug values correctly in dvd.c
+- Fix %e variable to check num_parts rather than part.
+- Use static buffer instead of static buffers for FI_to_ascii()
+ and stream_to_ascii() debug routines in SD. This is to prevent
+ possible race conditions between two threads (possibly Phil's
+ unmount problem).
+12Oct05
+- Add a .dir command that separates arguments with commas. It
+ is intended to be used in the tree routines to get a
+ machine readable output for GUIs like wx-console.
+- Make configure set dvd-handler to have execute permission.
+- Install dvd-handler as is done for other dvd-xxx scripts.
+- In block.c call dvd_write_part() to write last part rather
+ than dvd_open_next_part().
+- Prefix some DVD subroutines with dvd_
+11Oct05
+- Add extra debug to Update media error for VolFiles decrease
+ error.
+- Do not update dev->file on set_ateof() for non-tape.
+10Oct05
+- Mark DVD volume in error if part cannot be written.
+- If a DVD is mounted, unmount it before asking operator
+ to mount an different one.
+- Add Arno's dvd-handler script to the scripts directory and
+ integrate with configure. It replaces both existing scripts.
+- Make default schedule start at 23:10
+- Implement gui release in Makefile. It creates two .tar.gz
+ bacula-web and bimagemgr.
+08Oct05
+- Add README plus tar release to gui project.
+- Manual documentation
+- Tweak bacula.man doc
+- Add PRAGMA synchronous = NORMAL; to SQLite3 tables. This
+ speeds up SQLite3 so it is only about 10% slower than version
+ 2.xx. Thanks to Russell Howe for this tip.
+- Add msvcr71.dll to pebuilder .inf file as it is needed by
+ Bacula FD. Thanks to Brandon for passing this fix to Scott.
+05Oct05
+- Add VolumePurged method to Python JobEvents class. Fixes
+ bug #428 (I think).
+- Add JobId index to File table for MySQL.
+- Correct tray-monitor crash of bacula-dir. qstatus_cmd()
+ code referenced the wrong pointer. Fixes bug #438.
+- Add bacualnovss.mak file from Martin.
+- Remove Pool from restore string in wbrestorepanel.cpp. Patch
+ from user for bug #433.
+- Correct code in wbrestorpanel.cpp with misplaced parens. Patch
+ from user for bug #431
+- Correct printing filenames/date/times in wbrestorepanel.cpp due to bad
+ column alignment. Patch from user for bug #432.
+
+Changes to 1.37.40:
+01Oct05
+- Add fix to compat.h supplied by Martin Simmons that permits
+ building the Win32 FD without VSS.
+29Sep05
+- Bring cats/bdb_x.c routines up to version 1.37 (return values
+ are different).
+28Sep05
+- Integrate libwrap patch from Szechuan Death. They should
+ also fix bug 423.
+- Clean up patches directory for next release.
+- Add PS variable in bacula.in script as recommended by a user.
+ "Tom Boyda" <tboyda@daou.com>
+- Correct mtx-changer.in so it only creates a temp file when
+ actually needed.
+- Add patch supplied by user in a bug report to fix gnome2
+ console buttons.
+- Add patch from Peter Eriksson <peter@ifm.liu.se> to correct
+ NLS error in tray-monitor.c
+26Sep05
+- Add code to catalog_update to detect NULL db pointer.
+- Remove char *msg argument from catalog_request and
+ catalog_update().
+24Sep05
+- Fix mode change open in btape.c
+- Use nonblocking opens on Linux only.
+- Move set_blocking code for tapes into method to
+ simplify main line code.
+- Cleanup the code for ensuring we advance tape on fsf.
+ This should fix IBM problems -- patch from Adam Thorton
+ was very helpful.
+- Remove test for BMT_EOD in fixup_device... this eliminates
+ need for status_dev() routine.
+- Before doing label ensure device is in read-write mode.
+
+Changes to 1.37.39:
+20Sep05
+- Tweak daemon.c berrno, copyright.
+19Sep05
+- Fix handling of temp file in mtx_changer.in, reported as
+ a security bug, but it is not really. Bug #422
+- Fix security problem of handling temp file in randpass.
+ Bug #422.
+- During label/relabel, add new_volume to VOLRES list in SD.
+18Sep05
+- Apply Landon's patch for the TLS ANS1 API change.
+- Remove old code.
+- Make single exit path in first_open_device().
+17Sep05
+- Make new_volume walk through all Vols looking for dev
+ to release.
+16Sep05
+- Make "quit" command in console always allowed.
+- Remove the storage list between each console command to
+ keep Dir from remembering a previously selected SD.
+- Add code to reservation VOLRES subroutines to try to ensure
+ we don't end up with two Volumes on the same drive.
+- Simplify the mutex code in VOLRES a bit to reduce the chance
+ of error.
+15Sep05
+- Apply Nicolas' dvd-freespace.in patch.
+- Make sure SQL table names are not translated.
+- Eliminate incorrect message saying barcodes not
+ found in "label barcodes" -- fixes bug report.
+
+Changes to 1.37.38:
+07Sep05
+- Add ability to have passwords on backup of catalog as
+ alternate 3rd argument. Submitted by Andrew Ford
+ <A.Ford@ford-mason.co.uk>
+- Turn off TapeAlert by default if user enables a Device
+ and add a note to install mtx.
+04Sep05
+- Fix out of order volumes during restore.
+
+Changes to 1.37.37:
+30Aug05
+- Final tweaks to build Win32.
+- Enable debug code in ua_label.c
+- Remove devices from the Volume list even if no
+ Volume is in drive. Hopefully this fixes Arno's problem
+ of multiple volumes listed in the same drive.
+28Aug05
+- Apply Landon's patch for TLS default values.
+- Correct LOCALEDIR problem in build of Win32
+- Correct ssize_t problem in build of Win32
+- Add code to llprint pools and volumes to debug next item.
+- From bug report, fix resetting Pool defaults in Volume. It
+ was a typo "Max" was missing in several places.
+- Don't allow translation of database Volume Status values.
+24Aug05
+- Ensure that the drive is closed before calling the
+ mtx-changer script so that the script can access the drive.
+- Add drive name to reserved Volume list printout in SD.
+23Aug05
+- Fix bug in acquire.c that incorrectly reported volume
+ busy.
+- Add additional debug code and messages in reserve.c
+- Eliminate unwanted warning message in reserve.c
+
+Changes to 1.37.36 release 22Aug05:
+20Aug05
+- Landon's fix for NLS detection
+- Eliminate incorrect compiler warning on FreeBSD.
+- Move Win32 errno message build into berrno constructor.
+- Minor Win32 build tweaks
+19Aug05
+- A number of minor build Win32 fixes.
+- Remove a PostQuitMessage() as suggested by Thorsten so that
+ BartPE restore can work correctly.
+- Fix for create JobMedia so that VolIndex remains valid even
+ during a delete Job or pruning -- bug 402.
+- Minor tweak for Win32 build.
+18Aug05
+- Win32 fix -- remove debug O_NONBLOCK code.
+- Fix bug 399 -- make_catalog_backup does not work for sqlite3
+- Implement unloading a volume in a different drive if it
+ is needed in the current drive.
+- Implement search for unused autochanger drive.
+- Implement search for exact Volume in reservation before
+ other searches.
+- Fix picking up drive in Dir so that it is not done in
+ the status command.
+- Eliminate double check on "loaded" for autochanger.
+17Aug05
+- Start coding better reservation algorithm
+- Always look for slot for label command.
+- Add more debug code for autochangers.
+- Apply fix from Stephan Leemburg <sleemburg@jvc.nl> for
+ improper scanning of schedule resource:
+ Run = Level=Full Pool=Catalog daily at 1:20
+- Apply patch from Chris Lee <labmonkey42@gmail.com> for
+ adding --enable-build-dird --enable-build-stored.
+- Tweak datadir definition in configure.in
+16Aug05
+- Fix bug that missed drive=nn specification.
+- Eliminate nonblocking kludge in heartbeat of FD as
+ it caused high CPU usage.
+- Pickup loaded slot when doing open() of tape drive.
+- Make autochanger reservation code go through full list
+ first pass until exact match found.
+15Aug05
+- Fix how FileSet is saved in job record to correct continual
+ Full save.
+- Make datadir print on config.out listing.
+- Move get FileSet record up in backup init to eliminate continual
+ Full save seen by Peter Sjoberg.
+- Add VolumeName to read-only Python variables.
+- Add VolumePurged event for Python.
+- Suppress /dev/ Filesystem change prohibited INFO messages.
+- Do not delete FT_RAW files before restore (allows FIFOs
+ to be used for restore).
+13Aug05
+- Add drive specification to mount, unmount, release, label,
+ and relabel for Autochangers. Note Dir<->SD protocol has
+ changed.
+
+Changes to 1.37.35:
+12Aug05
+- Disable parts of NLS as the configure does not work here.
+- In job backup init (backup.c) define definitive Job level and
+ since time, *then* apply Job Pool override selection if any.
+11Aug05
+- Modified bconsole script so that it is improperly
+ installed, it will refuse to execute. This avoids
+ recursive call loops.
+
+Changes to 1.37.34:
+06Aug05
+- Apply David's ACL fix to src/filed/acl.c
+05Aug05
+- Apply patches sent by David Duchscher <kreios@gmail.com> for
+ making ACLs work on MacOS X and FreeBSD.
+04Aug05
+- Apply patch in bug#397 that improved configure
+ - find readline under $with_readline/include/readline
+ - no libutil under Solaris
+ - no need for -ldl under Solaris
+- Make reservation system single threaded during the
+ search to avoid two threads competing for the same
+ resource.
+- Correct a return code in find_suitable_device_for_job()
+ Possibly cause of "busy writing to another volume".
+03Aug05
+- Modify open() for tape so nonblocking really works.
+- Use fcntl() to reset blocking status rather than close()
+ and reopen the drive.
+- Make sure dev->open() is always called so that any change
+ in read/write permissions will occur.
+- Open drives initially in daemon in read-only mode.
+- Ensure that each time the VolHdr.VolumeName is zapped
+ or changed that free_volume() is called on the old name.
+
+Changes to 1.37.33:
+03Aug05
+- Require 5 arguments to mtx-changer except list and slots
+- Turn -EPIPE status returns from bpipe to ETIME
+- Include Slot in SD status output
+- Do not term_dev() during initialization in SD if the device
+ could not be opened. In the case of a tape drive, there may
+ be no tape in the drive.
+
+Changes to 1.32.32:
+02Aug05
+- Correct PostgreSQL database scripts as suggested by a user.
+- Add additional info to FATAL message generated when a device
+ is busy writing to another volume.
+- Suppress an inappropriate NULL Volume name message after a cancel.
+- Correct a warning message in reserve.c
+29Jul05
+- Apply user's patch to make mutiple modifiers for times
+ work correctly.
+- Make read_dev_volume_label() handle ANSI/IBM labels
+ correctly -- ie space over any label at the beginning
+ of the tape.
+28Jul05
+- Make ANSI/IBM writing of HDR1/2 labels ignore any
+ errors if at end of tape.
+- Apply Martin's patch to improve Python detection in
+ configure.in
+- Temporarily turn off disk seeking until I find the
+ cause of the problem.
+27Jul05
+- Add OSF1 patch supplied by user.
+- Use number of files selected from write_bsr() only
+ if it is not defined.
+- Explicitly seek to end of file when getting size for
+ restore test.
+- Correctly set EndBlock position in JobMedia record
+ for files.
+- Remove unnecessary set StartBlock in bscan. Caused bscan
+ regression error.
+26Jul05
+- Modify mtx-changer to wait a maximum of 300 seconds.
+- Do restart of failed jobs only for Backups job types.
+- A number of DVD updates from Nicolas.
+24Jul05
+- Turn off old service helper code in Win32.
+- Correct Messages bug found by Phil in stored.
+23Jul05
+- Complete (almost) documentation of 1.38.
+- Add error messages for error conditions with VSS.
+- Fix additional problems with VSS backup that I introduced.
+Changes to 1.37.31:
+22Jul05
+- Correct compiler complaints in wx-console and tray-monitor.
+- Correct VSS problems recognizing c:
+- Add VSS before job status
+- Fix output of status from being one big line.
+- Change cd xx; make to cd xx && make as suggested by Phil.
+- Cleanup projects file
+- Remove unnecessary casting of FF_PKT in filed.
+- Apply Thorsten's bugfix for vss_generic.cpp
+- Add check for df path for dvd_freespace
+- Use df to get space used on DVD.
+- Change sense of flag indicating erase DVD or not before writing.
+- Fix bpipe so that it never modifies the result pointer.
+- Replace more dev_name by print_name().
+- Rewrite edit_device_codes_dev() so it does not overwrite the
+ supplied buffer.
+- Update printing of labels (for bls) so that critical information
+ is printed (Job name and timestamp).
+- Cleanup old spool files when starting the SD.
+- Modify vss.cpp to allow C: as a path name. Otherwise VSS
+ doesn't work when only a drive name is given.
+- Modify vss.c to eliminate double / in filenames.
+- Update doc -- particularly the restore chapter.
+18Jul05
+- Make all files in working directory have .xxx at end.
+- Work on DVD writing.
+- Fix keepatime bug (bugs database).
+- Move Python variables from Job to Bacula. They are
+ DirName, Version, ConfigFile, and WorkingDir
+- Fix delete of bootstrap to only occur on Bacula created
+ filenames.
+- Allow cancelling a Job name that is not active. It is
+ sent to the daemons.
+17Jul05
+- Fix name space pollution by OpenSSL 0.9.8 reported by
+ Matthias Kurz -- applied his patch.
+- Fix bpipe.c so that it does not modify results pointer.
+ ***FIXME*** calling sequence should be changed.
+- Remove some remaining references to dev_name.
+- Fix calls to mount_dev() and unmount_dev() to
+ correspond to returned value (bool instead of int).
+- Try without success to make DVD writing work.
+
+Changes to 1.37.30 released 16 July 05:
+14Jul05
+- Fix "dir" command scanning field misalignment in
+ wx-console.
+- Switch to using the wxWidgets Unicode library.
+- Include msvcr71.dll in distribution.
+- Add VSS to status line in Win32 FD if enabled.
+- Get VSS build scripts working with Thorsten's help.
+- Unlink the bootstrap file after sending it to
+ the FD.
+- Remove sending include/exclude lists to the FD during
+ a restore -- deprecated code.
+- Cleanup the bootstrap files in the FD.
+
+Changes to 1.37.29:
+14Jul05
+- Remove old commented out code from configure.in
+- Add baculavssfd.mak file for building VSS version of
+ Win32 FD.
+- Correct date (year) in vss.cpp and vss_generic.cpp
+13Jul05
+- I finally found and squashed the elusive SD crash.
+ I needed to initialize the used volume list before
+ firing off the device initialization thread.
+Changes to 1.37.28:
+11Jul05
+- Make sure that bpipe results are zapped even on
+ error return.
+- Lots of documentation.
+- Do not prune volume marked as append when needing a
+ new Volume.
+- Print a warning message in SD if a non-used Volume
+ is specified and autolabel not turned on.
+- Correct a bug in chksum.c concerning SHA1 signatures
+ (an * should have been & when checking for a bit flag).
+- Print File:Block for all label records in label.c -- concerns
+ primarily bls when doing Job listings (-j).
+- Correct is_volume_in_use() to return false if testing
+ on the same device where the Volume is already mounted.
+- Define a init_done flag in the SD that is set when the
+ devices are initialized and make users connecting wait.
+ This prevents useless connect failure warning messages.
+- Do additional device locking in ask_op_to_mount_volume()
+ to prevent race conditions with a user labeling a Volume
+ or autolabeling.
+09Jul05
+- Add a test for error return from bnet_wait... in heartbeat.c
+ in FD to avoid CPU loop.
+- Implement TLS in gnome console and wx-console.
+08Jul05
+- Correct a NULL pointer reference in the mount command.
+- Correct typo in Copyright
+- Add detection of EOM for IBM drives (i.e. errno == ENOSPC)
+07Jul05
+- Remove temp file created in mtx-changer script.
+- Make fsf_dev() into a class method.
+06Jul05
+- Modify mtx-changer.in script to return slot:barcode for
+ Volumes that are loaded in the drives.
+- Correct some more places where dev->is_blocked() needs
+ to be checked in dircmd.c in SD.
+- Update doc.
+05Jul05
+- Add code to ensure that reserved but unused volumes
+ are freed.
+- Correct how Volumes are mounted and handled so that the SD
+ does not get stuck if multiple volumes are used (recycling,
+ relabling, ...)
+- Correct bug where you could relabel a volume while it
+ was being acquired -- created chaos.
+04Jul05
+- Correct seg fault caused by open() calling sequence change.
+03Jul05
+- Add new rc-chio-changer script by Rudolf Cejka to
+ examples/autochangers
+- Apply Rudolf's changes to bacula.in
+- Expand the space from 8 to 10 characters in editing
+ file sizes for restore and dir of catalog, otherwise
+ GB sizes are truncated -- fixes bug report.
+- Modify wx-console to know about 10 character widths.
+- Allow decending into top level directory if "recurse=no"
+ is set. Fixes a bug report.
+- Install pthreadVCE.dll when installing console or wx-console
+ on Win32 systems. Fixes bug report.
+02Jul05
+- Tweak dvd-writepart script to prevent door from opening/closing
+ so much.
+- Remove GROUP BY in several PostgreSQL commands to prevent error.
+ Resolves bug report.
+- Ensure that < as first character of filename list is not treated
+ as a directory for restore.
+- Add debug to heartbeat in FD as it seems to go into an
+ infinite loop from time to time during SD failure in DVD writing.
+- Add more debug code to dvd writing.
+- Attempt not to destroy existing fs on DVD.
+30Jun05
+- Detect device mounted for DVD and suppress be sure to
+ mount message after label.
+- Set Cleaning tape status to "Cleaning" and force no
+ MediaType.
+- Get DVD writing working with new standard Bacula open()
+ code.
+- Rename get_filename() to make more sense.
+- Detect "is already mounted on" on mount command so to avoid
+ error if device is already mounted.
+- Eliminated guess_name() code. It may be necessary to
+ add it back later.
+- Eliminate seg fault from printing invalid results.
+- Make dvd_write_part() bool.
+
+29Jun05
+- Attempt to fix DVD writing by eliminating a number of the
+ DVD subroutines to simplify.
+- Modify DEVICE::open() to take dcr as first argument. This
+ will permit providing more info to DVD opening.
+- Fix scanning for time/size items which in some cases
+ ate the next line.
+- Eliminate read_dvd_volume_label(). New code (not yet written)
+ *must* open dvd appropriately before calling
+ read_dev_volume_label.
+- Modify open_first_part() open_next_part() to take DCR as
+ argument.
+- Make label command from console work on DVDs.
+- Make mount command from console work on DVDs.
+ Unmount does not work yet.
+
+Changes to 1.37.27:
+27Jun05
+- Add Database vendor to CatalogRes tuple for Python.
+- Update doc
+- Implement DoesVolumeExist(Vol) for Python.
+- Prevent python command from seg faulting if no arg given.
+
+Changes to 1.37.26:
+26Jun05
+- Add set_mode method in DEVICE.
+- Correct set_mode method in DEVICE
+- Add more DVD debug info
+23Jun05
+- Check for incorrect duration and size modifiers in conf files.
+22Jun05:
+- Make Version a tuple (version, build-date)
+- Add CatalogRes tuple (DBName, Address, User, Password,
+ Socket, Port)
+- Add Version, ConfigFile, and WorkingDir as Python attributes
+ in the Director.
+- Implement code (principally for Win32) that on failure to
+ create a file, it will cd into the directory and attempt
+ to create the file using a relative path. This avoids creating
+ files with paths which fail on Win32.
+- Fix parsing of times and sizes with decimal numbers.
+- Make free_volume_list() in SD work if vol list is not
+ initialized (./bacula-sd -t).
+21Jun05:
+- Add debug error printout when open() fails.
+- If open() of DVD fails in mount.c, return false.
+- Split open() code for DVD into separate subroutine in dev.c
+
+Changes to 1.37.25 released on 20 Jun 05:
+20Jun05:
+- Remove (cd gnome-console; make clean) from src/Makefile.in
+- Fix bug where Storage daemon gets confused about what
+ tape is mounted. (one line of code was inadvertently
+ deleted).
+
+Changes to 1.37.24:
+18Jun05
+- DVD writing/reading seems to be mostly working.
+- Set execute bits on dvd-freespace and dvd-writepart
+- Make dvd-freespace use existing dummy file.
+- Modify dvd-freespace to pickup size from Track Size:
+16Jun05
+- Add Date, Job, level to updates to .bsr file in
+ dird/backup.c
+- Add debug info to dvd-freespace.in
+- Fix hard coded bacula.sql in make_catalog_backup reported
+ by a user.
+- Make sure a verify volume to catalog never reports an error
+ if there are zero files to verify.
+- Remove confusing debug info in filed/backup.c on network
+ error.
+- Make sure output from console is not sent to system log.
+- Convert open_dev() into a class method.
+- Change VolHdr.VolName to VolHdr.VolumeName.
+- Add a flag in the device state word to indicate that
+ we found Media in the drive (DVD).
+- Make mount_dev() and unmount_dev() return bool.
+
+Changes to 1.37.22:
+14Jun05
+- Fix the same state variable problem in open_next_part().
+- C++ify the SD code a bit more.
+13Jun05
+- Add more debug code and clarify debug code for DVDs.
+- Do not save and restore state in open_first_part()
+ since state should be properly set after open_dev().
+- Make default mandir /usr/share/man
+- Install Bacula man page.
+- Implement passing of FileSet Enable VSS to FD.
+- Move main body of reserve drive code into subroutine
+ so that it can be called multiple times.
+12Jun05
+- Eliminate getpass() for Win32 builds in console as the
+ function does not exist.
+10Jun05
+- Correct some reservation problems in SD when no devices
+ are available.
+- Start removing #ifdef HAVE_TLS by sneaky tricks.
+- Begin implementation of TLS in wx-console
+- Remove ignoring SIGCHLD from console.
+- Rework the dlist binary search routines for implemenation
+ of the Volume reservation code -- make it more general.
+- Strip double slashes // from Win32 filenames in an attempt
+ to resolve restore problems on some systems.
+- Fix a minor bugs in the trace code that caused the first
+ line output to be lost.
+- Implement a good first cut at adding Volume reservation code
+ to the storage daemon (in file reserve.c).
+- Remove old unused code from the tree.c routines.
+
+Changes to 1.37.21:
+06Jun05
+- Fix compile problems on Win32
+- Start writing Volume reservation list (already exists, but
+ is not really very good).
+- Implement attribute caching to put Signature into database
+ at the same time as the file attributes thus eliminating a
+ number of database accesses.
+- Correct a reservation problem.
+- Implement full Dir Storage use.
+- Reduce a bit of TLS #ifdeffing.
+
+Changes to 1.37.20:
+04Jun05
+- Minor changes
+01Jun05
+- Add more documentation to mtx-changer.in
+- Correct link to manual in authenticate.c in various
+ directories.
+- Create a new src/stored/reserve.c file where the
+ Use Storage command is processed and drives are
+ reserved.
+- Modify src/stored/autochanger.c to keep track of each
+ Slot that is loaded for each device.
+- Ensure that changer_command and changer_name are picked
+ up from Autochanger resource if not specified, and if
+ neither is specified, err.
+30May05
+- Fix bextract.c compile problem
+- Create bacula.man
+- Make make distclean clean a bit better
+29May05
+- Remove old code in jcr.c
+- Make testls release jcr chain when terminating.
+27May05
+- Implement Maximum Job Spool Size (actually DCR based)
+26May05
+- Use light weight non-recursive locking on jcr chain.
+- Make JCR a class and implement inc_use_count() and
+ dec_use_count() methods that ensure that the jcr is
+ locked when inc/dec the use count.
+- Remove the global jcr lock when traversing the jcr
+ chain.
+- Use dlist to implement the jcr chain rather than hand
+ crafted next and prev links.
+- Lock the jcr chain inside each function that modifies
+ the chain.
+
+Changes to 1.37.19:
+26May05
+- Fix compile problem of ua_restore.c on broken compilers.
+- Apply patch from bug 326 to permit bacula status by any user.
+- Fix bug 325 -- conversion of 12:30pm to 24hour time.
+25May05
+- Put Dmsg() on inside if() to avoid calling subroutine.
+- Make restore.bsr have unique name.
+- Allow user to define bsr filename on restore command line
+ with bootstrap=xxx.bsr
+- Add limit=nnn to "list jobs" command.
+- Remove old restore code that did not use .bsr file.
+- unlink automatically generated bsr file.
+- Cleanup heartbeat code so that duped fd is almost sure
+ to be released. Previously under certain conditions, the
+ memory was not released due to race conditions.
+- Shorten copyright.
+20May05
+- Unify the reserve_device() for a single device into one subroutine.
+18May05
+- Modify wait during use_device to happen only after all devices
+ have been examined rather than in the reserve_device code.
+- Correct updating count of number of Volumes in a pool.
+
+Changes to 1.37.18:
+16May05
+- Add more debug to SD for Autochangers + status output.
+- Add Scratch to PoolType in PostgreSQL make...tables and do not
+ permit NULL PoolTypes. Fix for bug 319 reported by Eric.
+- Update LICENSE.
+- Add quotes around filename in parse_config error message. Bug
+ reported by Eric.
+15May05
+- Change nested \include to \input so that sections are properly
+ included in the pdf manual -- update the Web site.
+- Set reconnect flag in MySQL packet to 1 to ensure that connection
+ is re-established. MySQL 5 changed default to 0. Fixes bug report.
+- Fix Scratch pool handling as reported in a bug by Eric Bollengier
+ by applying his patch.
+- Remove delete job in favor of delete jobid.
+- Add = NULL to configfile definitions as reported by Eric in a bug
+ report.
+- Update winbacula.nsi.in to reflect new manual file structure.
+10May05
+- Correct a minor build problem with wx-console.
+- Add cancel() to Dir Python scripting.
+- Re-correct bug in parse-config error handling.
+- Reorganization of use_command in SD to permit
+ waiting and multiple drive autochanger support.
+09May05
+- Correct bug in parse_config error handling.
+- Where ever possible mark a volume in error or not
+ InChanger in mount.c
+- Fix bug in changing tape pools after first backup. Reported
+ by Peter Sjoberg.
+- Enhance mtx-changer to use Working Directory as temp.
+- Remove all but initial setup locking of Res in SD.
+08May05
+- Add Client OS type to Job report.
+- Add version to manual
+- Update the Web site to have a single page for
+ the documentation links.
+
+Changes to 1.37.18 release 08May05:
+08May05
+- Correct attribute definition compile error in 1.37.17
+- Correct inverted order of CreateTime and MD5 pointed out
+ by a user in a bug report in FileSet db routine causing
+ the MD5 to print in the output instead of the time/date.
+
+Changes to 1.37.17:
+07May05
+- Implement cstrlen() in sql list routines.
+- Implement caching of attributes to add the
+ signature so that only one DB call will be made
+ per file. Not yet turned on.
+- Fix Win32 build for TLS.
+- Optimize File pruning to eliminate one database call.
+- Fix bug that prevented File pruning from working.
+- Implement a cstrlen() which returns the character
+ length of a UTF-8 string.
+06May05
+- Move test for MaxStartDelay as suggested by Peter.
+- Implement Python methods (I had to read the Python source
+ code).
+- Implement run() method in Director.
+- Add Priority and Scheduled time to Job report.
+- Add JobInit and JobRun events.
+- Add Priority as Python read/write attribute to Job.
+- Correct typo in bsmtp reported by Jo.
+
+Changes to 1.37.16 released 05May05
+03May05
+- Make a few tls ifdef tweaks.
+- Fix create_file.c Win32 problem pointed out by
+ Peter Sjoberg.
+- Fix really ugly bstrncpy() but found by Thorsten.
+- Move winapi.h/c from findlib to lib for inclusion in
+ multiple places.
+02May05
+- Thorsten Engel finished his work on Win32 Unicode. We
+ now have a single executable that runs on all Win32 machines.
+- Move job initialization code after job scheduling so that
+ Verify jobs check for the prior JobId after they are really
+ started rather than before.
+- Fix lib/fnmatch.c so that it does proper testing before folding.
+- More documentation -- at tls and ansi labels chapters.
+- Fix fileset_convert.pl to handle empty Exclude statements.
+- Turn regex back off in Win32
+01May05
+- Fix sign extension problem in lex.c that reads UTF-8
+ with Chinese characters incorrectly.
+
+Changes to 1.37.14:
+30Apr05
+- Remove a few HAVE_TLS #ifdefs
+- Implement final Python style interface. More implemention to
+ be done, but the interface should change little if at all.
+28Apr05
+- Make default no tls support. You must add
+ --with-openssl to get tls support.
+27Apr05
+- Update Web header to include google search in the search
+ box -- thanks to input from Michel Meyers.
+- Fix md5sum so that it builds with the new openssl stuff.
+- Take some enhancements to the md5sum test program proposed
+ by a user.
+26Apr05
+- Make option 2 of restore work better (list last 20
+ locations for a file).
+- Make SD re-open a device with the right permissions if
+ it was previously opened with something different. This
+ should allow reading read-only Volumes under all circumstances.
+- Implement restore of a single directory.
+- Apply Tru64 patch supplied by Pascal Pederiva <freebsd@paped.com>
+- Apply Unicode fixes for Win32 from "Thorsten Engel"
+ <thorsten.engel@matrix-computer.com>
+- More work on Python read feature in FD.
+22Apr05
+- Fix (hopefully) the ftello() overflow reported by Peter.
+- Landon Fuller committed his TLS patch.
+- Fixed two minor warnings in console.c with TLS turned off.
+- Updated the Makefile.in for wx-windows and tray-monitor
+ to handle TLS. I can only build the tray-monitor.
+- Fix bscan to open tape in read-only mode (actually fix
+ it so that it doesn't use the standard open routine).
+- Correct what appears to be an error in setting the
+ return value in dvd.c
+21Apr05
+- Get FD Python running -- design Python backup interface.
+- Fix seg fault in SD when referencing Alert Command.
+- More documentation.
+- Fix one more thing in Win32 build.
+20Apr05
+- Doc updates
+- Fix Win32 build
+- Put in production here
+
+Changes to 1.37.13:
+19Apr05
+- Fix SQLite and PostgreSQL table creation script syntax
+ problems.
+- Fix new Python code to work for Director.
+- Move lib/python.c to lib/pythonlib.c so that debug output
+ is easier to read (can distinguish lib from dird, ...).
+- Cleanup Python build so that Python is not dragged
+ into programs that don't use it.
+
+Changes to 1.37.12:
+18Apr05
+- Make Bacula build without Python
+- Drop Status table in drop_ scripts -- bug 283.
+17Apr05
+- First cut of new Python implementation.
+- Doc updates
+- Correct SuSE autostart installation directory name --
+ supplied by a user.
+- Fix director crash if Name directive not supplied
+ in a Job.
+- More cleanup of changing %u to %s for DB IDs.
+- Replace dev_can_write(dev) with dev->can_write().
+11Apr05
+- Make fsr_dev() a method dev->fsr()
+- Remove Created new FileSet message as it always comes out in
+ the daemon messages.
+- eliminate dev_is_tape() in favor of method dev->is_tape()
+- Turn on disk seeking during restore for Disk Volumes. This
+ required some changes to bscan -- they seem a bit suspect as
+ they are not symmetrical for tape/files.
+- Remove some of the recursion in stored/parse_bsr.c
+- Move rescue out to a new Bacula project.
+08Apr05
+- Update of web site to replace old released doc with LaTeX
+ version.
+- Replace logo in doc with new bat logo.
+- Make a good number of updates to the manual.
+- Implement Python scripting using a Python object.
+ This makes interfacing with Bacula cleaner (object
+ oriented).
+- Add Phil's Status table to the database -- used
+ for getting a text description of the status
+ codes.
+- Modify FileSet so that an empty Options (no
+ pattern matching) with an exclude will exclude
+ all files.
+- Modify FileSet so that no top level included File
+ item can be excluded by a matching pattern.
+- Suppress Created new FileSet record in daemon output.
+- Implement Python in FD.
+- Turn off old bfile reader code -- to be replaced
+ by Python.
+05Apr05
+- Remove more recursion in src/dird/bsr.c as pointed out
+ by Peter.
+04Apr05
+- Increase the index width of Filename and Path entries
+ to 255 chars -- suggestion of Meno Abels.
+- Change remaining VARCHARS to TINYBLOBs.
+- Remove recursion from is_volume_selected() following
+ bug report from Peter.
+- Implement mostly correct handling of use_storage in the
+ SD with full lists, ...
+02Apr05
+- Reset NumVols in Pool record from database on every update
+ Pool.
+- Modify DB to support multiple simultaneous copies and
+ RAIT stiping.
+- Pass copy and stripe between DIR and SD and put into
+ the JobMedia DB record.
+- Update and test SQLite and MySQL datebase creation and
+ update scripts.
+- Implement version 9 of the DB.
+31Mar05
+- Convert more atoi to str_to_int64() for DB.
+- Implement filling in NumVols by querying DB rather
+ than trying to keep track of it.
+- Add storage name to string passed to in use storage=
+- Fix newVolume() so that the Python script is always
+ called.
+- Fix handling of pool,PoolId, and storage in ua_output.
+- Same fix in ua_status.c
+- Remove required locking of resources
+- Replace pthread_cond_signal() by pthread_cond_broadcast()
+ hoping to fix the /lib/tls hang problems (lost signal).
+- Move resource locking seaching from parse_conf.c to res.c
+ in src/lib.
+- Modify end of volume handling so that fixup_... does not
+ redo what block.c has already done -- writing Vol info to
+ DIR. This fixes a bug with bad numbers of files on a tape
+ when it filled as reported by Peter.
+- In release_device() do not update the DIR on the Volume
+ info if the the information was already written at the
+ end of the tape.
+28Mar05
+- NOTE!!!! This version has a new DIR <--> SD protocol. Both
+ must be updated at the same time.
+- Begin implementation of passing all the Storage and Device
+ possibilities to the SD for examination during the reserve
+ phase.
+- Modify the reserve and acquire code in the SD to make a
+ job wait if the device is not available.
+- Implement New Volume Each Job in DIR and pass to SD, not yet
+ used.
+- Remove init/update of the Device resource in DIR
+- Remove passing PoolId to SD and back.
+26Mar05
+- Remove \a and -e from error echos in most Makefiles.
+- Add more debug code when there are errors on the tape
+ to try to find Peter's tape problem.
+- Add wait.c (oops forgot previously).
+- Move all the reserve/acquire_device_for_read/append to have
+ only a DCR as the argument.
+- Rework the reserve_device_for_append() in stored to wait
+ if the drive is not available. Note! This is a short
+ term solution.
+25Mar05
+- Comment out Multiple Connections in the document.
+- Move the P() and V() to subroutines so that they can be accessed
+ from class methods. The reference to strerror() caused problems.
+- Implement new DEVICE class methods block() and unblock() that
+ do what was previously done in 3 lines of code.
+- Implement wait_for_device(), which will wait for any device
+ to be released then return. This requires a new global mutex
+ and condition variable, and is implemented in src/stored/wait.c
+- Change the code in reserve_device_for_read(), which previously
+ failed the job to use the new device wait code.
+22Mar05
+- Apply reschedule patch to 1.37 code.
+- Add copyright to title page of manual so it is clear.
+- Create patch for rescheduling problem found by Ludovic. Storage
+ pointers were lost during rescheduling.
+- Attempt to fix 2.6 rescue disk -- failed!
+- Start working on adding a wait routine in the SD.
+- Cleanup some old invalid doc in watchdog.
+- Convert a number of references to dev->dev_name to dev->print_name().
+- Add new wait.c file to SD.
+- Add a few more methods to DEVICE in SD to cleanup code a
+ bit -- implement a few of the methods.
+18Mar05
+- Fix more print_name()s for printing device name.
+- Modify open_dev to try 10 times every 6 seconds to
+ open the device if it gets an I/O error (meaning no
+ volume mounted). This gives a bit of settling in time
+ for an autochanger and avoids spurious messages.
+- Change all yes/no to yes|no in the manual.
+- Fix win32 create_file.c typo.
+- Fix a typo in an error message.
+17Mar05
+- Detect if fseeko exists with autoconf. If so, use it
+ and ftello.
+- Remove old bacula-*.conf from examples directory (out
+ of date).
+- Remove latex-fr index files from CVS.
+- Rewrite code that stops reading the tape so that the
+ tape is marked at EOT, then once the work is done,
+ the EOT flag is removed.
+- Flush output to file after every send in console.
+- Make setting VolFiles to smaller number fatal.
+- Disable Multiple Connections code.
+- Add patch from user for NetBSD statvsfs() fix to
+ fstype.c
+- Take more care with errors in acquire.c
+- Don't run through dvd code in append.c if bad status
+ returned.
+- Modify code so that an autochanger fault is fatal.
+- Use dev->print_name() in more places.
+- Implement dev->can_steal_lock() to simplify code.
+- Make btape re-read first 10000 records on fill command.
+- Check error return and fail job from fseeko and ftello
+ in spool.c. Don't let a -1 slip in as size.
+
+Changes to 1.37.7:
+15Mar05
+- Apply NetBSD patch from kardel in bug 258.
+14Mar05
+- Add a second job and a second client to the default
+ bacula-dir.conf file.
+- Remove old style Include/Excludes.
+- Fix ANSI labels to put EOF1 and EOF2 after each file mark.
+- Add Python to SD and FD.
+12Mar05
+- Implement IBM labels
+- Implement EOF and EOV labels at the end of a volume.
+- Fix a rather ugly problem with the PoolId not getting
+ passed correctly. Now the DIR passes the Pool name and
+ Media Type to the SD, who passes them back when requesting
+ the next Volume. The DIR then looks up the correct PoolId.
+ This takes more time, but always works, AND allows wild
+ card Media Types (i.e. the SD can decide).
+- The DIR <==> SD protocol has changed.
+
+Changes to 1.37.6:
+11Mar05
+- Fix scanf of PoolId in catreq to handle 64 bit Ids.
+10Mar05
+- Add new ua_update.c file and move update_cmd there.
+- Modify "update slots" to obtain actual number of slots.
+- Tweak autochanger code to handle new slots request.
+- Modify autochanger code to lock/unlock around slots and
+ update slots code.
+09Mar05
+- Patch the FD so that it does not issue an error message if
+ it attempts to restore the permissions on a Win32 drive.
+- Edit "Resource-name" (physical-name) for the device name
+ everywhere in the SD.
+- Remove .linked.tex files in preparation for cutover to
+ using .tex in place of .wml.
+08Mar05
+- Copy latest config.sub and config.guess from autoconf.
+- Try new way of identifying drives with:
+ "resource-name" (physical-name)
+ More work need to a complete conversion.
+07Mar05
+- Rework some of the autochanger data so that the DIR has
+ the number of drives.
+- Modify the way the Device info is returned so that it comes
+ back as a special message type and can be sent anytime the
+ Device status changes.
+- Copy the change name and changer command into the device
+ record if none is specified.
+- Require the change command and changer name to be specified in
+ and AutoChanger resource.
+- Force all the Media Type records of all devices in an Autochanger
+ to be the same.
+06Mar05
+- Add new "run" command to Job resource in DIR. This permits
+ cloning a job as many times as you want.
+- Pass PoolId to SD on Query request. It is now used in the
+ Find_media catalog request.
+- Reworked the Device resource in the DIR. Eliminated num_waiting
+ and use_count, but added max_writers, reserved, and PoolId.
+- This DIR is nolonger compatible with previous SDs.
+- Add since and cloned keywords to the Console run command
+ to support cloning.
+- Implemented store_alist_str() to allow multiple string items
+ to be specified in a .conf file.
+- Added %s (since time) to Job code editing.
+- Reworked reserving drives in the SD. It now does it much simpler
+ and correctly.
+05Mar05
+- Integrate HP-UX patch from Olivier Mehani <olivier.mehani@linbox.com>
+- Fix FD job.c to test correctly for no level.
+
+Changes to 1.37.4:
+04Mar05
+- Change Developers to Developer's Guide as requested by Michael.
+- Fix developers link in manual
+- Add additional dcr changes in SD to allow multiple dcrs.
+02Mar05
+- Fix a few problems with the MySQL table create in 1.37.
+- Delete the new tables in the table delete files.
+- Increase the number of items permitted in a conf table.
+- Make Director loop over alternative Devices specified in the
+ Storage resource until one is reserved by SD.
+- Fix storing of StorageId in Media records.
+- Add AutoSelect = yes|no in bacula-sd.conf
+- Add Autochanger support to Label command.
+- Do not autoselect devices with autoselect set false
+01Mar05
+- Implement setting DIR Storage device to Autochanger
+ name.
+- Select first available device in Autochanger.
+- Pass back actual device name used.
+- Allow Query of AutoChanger.
+- Modify Query to include name of AutoChanger if
+ Device belongs to one.
+- Remove old Pool code in jobq.c
+- Add Autoselect flag to query and DEVICE class (still
+ need Directive).
+28Feb05
+- Lock autochanger script when running.
+- Mark Volume not InChanger if correct volume is not
+ autoloaded.
+- Corrected some typos in the make_xxx_tables.in files.
+- Made preliminary split of pre-run and run code for each
+ job type. This will permit early opening of SD for reserving
+ drives.
+- Add offline and autochanger fields to Device Query record.
+- Correct pthread_mutex_init() for autochanger in SD.
+- Tweak Makefile for LaTeX manual, plus add nav buttons.
+26Feb05
+- Clean up drive reservation system. Add more sanity checks.
+- Implement a few more methods for the DEVICE class in SD.
+- Add latex directories to make clean
+- move DEV_BSIZE to B_DEV_BSIZE to avoid conflicts with
+ certain header files (FreeBSD).
+24Feb05
+- Fix an ASSERT that was triggering in stored/acquire.c
+ attempt to fix a bug report.
+23Feb05
+- Corrected SunOs to SunOS in btraceback (user submitted).
+- Applied patch from Roger HÃ¥kansson <hson@ludd.luth.se>
+ to warn the user of defective AWKs during ./configure.
+20Feb05
+- Add some changes submitted by a user for HP client build.
+ Not all changes accepted.
+- Rework code in filed/backup.c to ease #ifdefing and make
+ program flow more obvious.
+- Split DVD code out of dev.c into dvd.c
+- Tweak #ifdefing to add back all the performance measurement
+ #defines in version.h
+- Put most of MTIOCGET code in a subroutine to simplify the
+ mainline code.
+- Make clean remove old CVS files
+- Remove unnecessary image files from Latex directory
+- Implement remaining parts of Storage DB record and
+ its use in the Director.
+- Implement
+ FullMaxWaitTime, Differential Max Wait Time, and
+ Incremental Max Wait time in Job resource.
+- Start work on SD Autochanger code.
+19Feb05
+- Add back JobId index for MySQL as default -- speeds up
+ pruning.
+- Add more database fields and fix the update scripts to
+ include the new items.
+- Pass actual level to FD so that ClientRun editing can reflect
+ correct level -- ditto for job status. This makes the DIR
+ incompatible with older clients!
+- Move jobq.c acquire resources to static subroutine so that
+ the code logic becomes clearer. This is in preparation for
+ actually using the new Device resources.
+- Fix some lower case problems in sql_cmds.c reported by
+ Debian.
+- Correct a seg fault in the SD reported by a user. Occurred
+ only when a high debug level was set.
+- Modify init_dev() in dev.c to take JCR as first arg so that
+ proper error messages can be reported in next item.
+- Modify the query and use device SD commands to attempt to
+ open the device if it could not previously be opened.
+- Correct error message for Could not reserve device.
+- Correct some minor details with Autochanger resource in SD.
+18Feb05
+- Fix seg fault if debug level 900 set in SD.
+- Truncate Win32 child return code to 8 bits.
+- Remove some old lld's.
+
+Changes to 1.37.3:
+16Feb05
+- Make another attempt at fixing the ClientRunXXX return code
+ bug on Win32 machines.
+- Apply ua_status patch from Carsten Paeth <calle@calle.in-berlin.de>
+ which enforces console ACLs in the status command for Jobs.
+15Feb05
+- Fix Media LabelDate and FirstWritten to be correctly set.
+- Fix deadlock in multiple simultaneous jobs.
+- Fix tape "truncation"/"number of files" after restore bug.
+10Feb05
+- Ensure that correct error messages are returned when
+ reading an ANSI label.
+09Feb05
+- Modified ANSI label code to preserve any ANSI label
+ already found by skipping over it rather than rewriting
+ it.
+- Split the ANSI label code into ansi_label.c
+- Do not let user relabel an ANSI labeled tape.
+- Applied a patch for the console help command supplied
+ in a bug report.
+- Added some new dev methods. Most notably was
+ set_eof(), which handles setting all the dev variables
+ when an EOF is just read. This is now used most everywhere
+ in the code.
+07Feb05
+- Added code to detect that no files were inserted into the
+ tree for a restore. If a specific JobId was specified, the
+ user has the option of restoring everything.
+- More progress in implementing 64 bit DB Ids.
+- Modified the daemon start messages for RH.
+- Implement update scripts for all database types.
+- First cut at implementing restore directory (it will not
+ recurse).
+04Feb05
+- OK, I think ANSI labels work.
+- Added Label Type = ANSI|IBM|Bacula to Device resource in SD.
+ If this is set, it will force writing of the appropriate
+ label type.
+- Added Check Labels = yes|no to Device resource in SD. If this
+ is set, Bacula will check for ANSI labels and accept them,
+ otherwise, ANSI labels will not be accepted when the tape
+ is first mounted.
+02Feb05
+- Second cut ANSI labels.
+01Feb05
+- Merge Preben's patch for ACLs and for Mac OS X resource forks.
+- Some doc updates.
+- Display more informative message when a device was not
+ found or could not be opened.
+- Add the sqlite3 database scripts.
+- Add some patches for 1.36.1 (note, I have now prepared
+ a 1.36.2 with all the patches and some new features --
+ to be documented).
+- Some minor doc updates.
+- Add Arno's baculareport.pl script to the examples directory.
+29Jan05 -- after vacation
+- Add support for SQLite3 (it seems to run at 1/2 the speed
+ of SQLite2). Use --with-sqlite3 instead of --with-sqlite
+ to get SQLite3.
+- Add target for running qemu to boot Rescue CDROM
+- Add code to support kernel 2.6 in Rescue CDROM -- does NOT yet
+ boot correctly.
+- Implement ANSI labels -- not yet tested.
+ This required changes to DB format. No upgrade script yet.
+ Note, more work needed to modify "update" command to handle
+ changing label types, also must restrict volume name lengths
+ to 6 characters.
+- Add new Device, Storage, and MediaType records to DB. No
+ upgrade script yet.
+- Add MediaType to bsr file record types. Not yet used in SD.
+- Permit multiple device specifications in Storage resource in
+ Dir conf file.
+- Implement Device resources. Director requests Device resource
+ info from SD on startup.
+- Note!!!! DIR->SD incompatible with previous versions.
+- Remove multiple Storage definitions in Job resource. One can
+ still specify multiple Storage resources, but they all go into
+ a single alist, and imply sending data to each Storage daemon
+ simultaneously.
+- Implement Device query command between DIR and SD.
+- Allow DIR to "reserve" a Device. It will then be acquired
+ when the FD connects to the SD.
+- Turn all DIR resources into classes, and implement a few class
+ methods -- more to come.
+- Turn DEVICE in SD into a class, and implement a number of inline
+ class methods -- more to come.
+- I had serious problems with ACL errors on my Laptop, and so had
+ to add the following patch:
+ @@ -181,7 +181,7 @@
+ }
+ /***** Do we really want to silently ignore errors from acl_get_file
+ and acl_to_text? *****/
+ - return -1;
+ + return 0;
+ }
+- Added edit_int64()
+- Reworked and tested a bit the htable routines.
+- Major changes to SD acquire.c -- DIR can now reserve devices. Needs
+ lots of testing!!!!
+- Made a special state code for DVD -- this simplifies the logic
+ of the code, but I probably broke it. Testing needed!!!!
+- Add AutoChanger resource to SD, but not yet used.
+
+Changes to 1.37.2:
+12Jan05
+- Integrate Preben 'Peppe' Guldberg <peppe@wielders.org>'s
+ acl patch. Fix case where configured but no ACL exists.
+ Rework calling arguments to be shorter and positioned
+ more typically in Bacula usage.
+11Jan05
+- Fix scripts/bacula.in to have awk on an environment variable
+ and add comments for Solaris users.
+- Turn off inet_aton in src/lib/address_conf.c for Win32
+- Add new files to win32 build and eliminate a compiler warning.
+- Add sample DVD Device resource to bacula-sd.conf
+08Jan05
+- Integrate Nicolas' patch for direct DVD support.
+07Jan05
+- Fix fstype error returns.
+- Apply Preben's cleanup.patch which puts back much of the
+ cleanup code in src/filed/restore.c
+06Jan05
+- Apply all of Preben's patches, but revert to old backup.c
+ and old restore.c in filed. Also turn off code in new
+ acl.c because of errors. The new code, when fully implemented
+ moves platform specific code into acl.c.
+ One of the patches also implements WildFile and WildDir -- thanks.
+01Jan05
+- Implement Python in the SD (no events yet though).
+- Fix some typos in the previous commit.
+30Dec04
+- Enhance CDROM boot to include some documentation at boot time.
+- NOTE!!!!! The CDROM will not boot 2.6 kernels because the
+ boot sequence has changed significantly. Updates to come
+ later.
+- Add memtest option to CDROM boot.
+- Include Nicolas' changes to fix llist JobMedia records.
+- Make sure that ClientRunBefore/After messages from the program
+ are terminated with a newline. Add strerror to output error
+ messages.
+- Return program exit status code in Win32.
+29Dec04
+- Add memtest86 to Bacula Rescue disk
+- Enhance Rescue disk startup screen
+24Dec04
+- Move some variables to eliminate Solaris 2.6 compiler warnings.
+- Fix the seg fault at the end of a job in the FD when using
+ old style include/excludes.
+22Dec04
+- Apply Preben's ACL patch.
+- Integrate Preben's restore patch.
+- Integrate Preben's verify teaks.
+- Fix doc/latex/Makefile to copy/remove .eps files when building
+ html and web outputs.
+21Dec04
+- Fix Bacula so that it does not exit if there is a syntax error
+ in its conf file during a reload command. Bug 182.
+- Apply fixes suggested for old Solaris networking.
+ Fixes bug 190.
+- Apply Preben 'Peppe' Guldberg <peppe@wielders.org>
+ three patches that clean up white space:
+ ws.patch.02.strings:
+ Breaks strings that span lines into concatenated strings. I am not sure
+ if you like this one. Other code works with concatenated strings, though.
+ ws.patch.03.trailing:
+ This removes trailing whitespace. No changes resulted from this for
+ my setup.
+ ws.patch.04.leading:
+ This replaces space runs at the start of line with tabs. No changes
+ again.
+- Fix overriding storage specification to be done
+ through a subroutine.
+- Fix autoconf so it runs with FC3.
+- Add Python4.3 to configure search paths.
+- Always copy and delete storage definitions into jcr.
+- Check that VolumeName supplied by Python is valid.
+ Return 0 if not.
+19Dec04
+- Fix undefined in non-Python build.
+- Update rescue disk to include mkinitrd
+- Fix umount_drives in rescue disk (only one arg to umount)
+- Ensure that if SD is manually set in Console, it is used.
+- Put generate_event on pointer and plug it in init. This
+ permits using it in /lib
+- Correct despooling size reported to be Job specific rather
+ than for the whole drive.
+18Dec04
+- Fix bug 207. jcr use count off by one when manually
+ scheduling jobs.
+- Remove FNMATCH test in configure.in and always use
+ the one in our library to get the FN_CASEFOLD GNU
+ extensions on all platforms.
+- While using the rescue CDROM after my computer would not
+ boot, I realized that it would be very useful to have
+ a umount_disks. So, it is not implemented, along with
+ updates to the READMEs and some minor tweaks.
+- Moved mounting the CDROM in the rescue boot from /cdrom
+ to /mnt/cdrom (more standard location).
+- Reboot in CDROM rescue should now work -- requires -d
+ option (no write) to work.
+- Hopefully fixed all the IPV6/4 problems and buffer
+ problems with networking in lib. Bugs 190 and 204.
+ Cleaned up a lot of #ifdefing problems by using routines
+ in address_conf.c
+17Dec04
+- Apply Preben 'Peppe' Guldberg <peppe@wielders.org>
+ alist fix patch.
+- Remove duplicate code from chksum.h (mentioned by Preben).
+13Dec04
+- Integrate Tim Oberfoell <oberfoell@web.de> patch to ACLs
+ to handle both the "standard" and "default" ACLs.
+12Dec04
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ three cleanup patches (btest, verify, find).
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ three cleanup patches (backup, chksum, and verify)
+09Dec04
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ patch to avoid doing MTIOCGET on OSes that do not support
+ it such as OpenBSD.
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ patch to add filesystem type matching to FileSets in the
+ Options resource.
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ patch to add Mac OSX resource fork support (save/restore)
+ to Bacula -- HFS Plus support.
+- Add FileSet to client Job listing query.
+06Dec04
+- Integrated Preben 'Peppe' Guldberg <peppe@wielders.org>
+ patch to backup directories skipped (due to no file system
+ changes or no recursion), and to add a slash to the end
+ of the directory name during the match process.
+- Implement Jamie ffolliott <jamieff@inline.net>
+ patch to dird_conf.c that enables Multiple Connections and
+ fixes a typo in show. The rest of his patch awaits my suggested
+ changes.
+05Dec04
+- Implement run command in Python
+04Dec04
+- Implement conversion of the manual, and some minor
+ tweaks to the script tags.
+- Apply a patch supplied by Preben 'Peppe' Guldberg that implements
+ ignore case in wild cards and regexes.
+- Fix a truncated line in the above patch due to my cut and paste.
+03Dec04
+- Fix it so that the InChanger flag is only changed for Volumes
+ in the same Pool.
+- Add PIDOF configuration path and apply to bacula.in
+- Add user supplied patch to add inet_aton() of old Solaris
+ systems.
+- Require pools to match before allowing multiple simultaneous
+ accesses to same storage resource.
+- Add patch supplied by Martin to correct buffer overrun in
+ bsnprintf() with no library snprintf().
+02Dec04
+- Apply user supplied patch that implements No Hard Links.
+- Document Python interface
+- Add hardlink keyword patch supplied by David R Bosso <dbosso@lsit.ucsb.edu>
+01Dec04
+- Fix non-python prototypes in dummy routines.
+- Add python 2.3 to config search list (user submitted patch)
+- Add JobStatus to Python variables.
+28Nov04
+- Add "python restart" command in Console.
+- Make built-in variables table driven.
+- First cut of Python Events for Bacula. Director only.
+ StartJob, EndJob, NewVolume events.
General:
-Changes to 1.38.3:
+Release 1.38.3 05Jan06:
+04Jan06
+- Move the suitable_drive flag to a better place to prevent
+ premature termination of the reservation if all drives
+ are busy -- should fix Arno's diff/inc pool failures.
+26Dec05
+- Add mutex to single thread VSS code in Win32.
+
Beta release 23Dec05:
22Dec05
- Add OPENSSL_INC to console dependencies, lib dependencies, and
+++ /dev/null
- Technical notes on version 1.39
- Kern Sibbald
-
-General:
-
-Changes to 1.39.3:
-22Dec05
-- Simplify code in askdir.c that waits for creating an appendable
- volume so that it can handle multiple returns from the wait
- code.
-- Modify the wait code to permit multiple returns.
-- Return a zero when "autochanger drives" is called and
- it is not an autochanger.
-- Make rewind_dev() a method taking a DCR as an argument.
- This permits closing and reopening the drive if the
- rewind fails as happens if the drive was loaded while the
- file descriptor was open. This refreshes the file descriptor.
-- Remove the ST_OPENED flag and always rely on fd < 0 for knowing
- if the device is open or not. This should eliminate
- Arnos problem.
-- Return error if reserve cannot find at least one suitable device.
-- Make wait_for_sysop() return correct state information.
-- Fix Win32 state file problem. write was not using compat
- code. This should fix bug #500.
-21Dec05
-- Modify gui on command to set only GUI mode and not batch.
-- Modify .messages command to always print messages regardless
- of the mode.
-- If GUI mode is on, suppress automatic printing of
- You have messages.
-- Delete old bnet packet code.
-- Ignore new BNET_START_SELECT and BNET_END_SELECT signals in
- wx-console.
-- Modify restore command in wx-console to set gui on and to use
- only .messages instead of messages. Hopefully this fixes bug
- #514.
-- Fix seg fault in exit of acquire when canceling a job --
- reported by Wolfgang Denk
-- Pull in latest reservation system changes from 1.38
-- Make .messages command always print messages regardless
- of the automessages flag.
-17Dec05
-- Fix seg fault if user labels a drive directory bug #513
-- Remove quotes around Version as it breaks things.
-16Dec05
-- Merge in Aleksandar Milivojevic's mods to the spec file.
-- Apply sparse code fix for raw drives and fifos. Bug 506
-- Thorsten fixed Unicode cd problem with wx-console bug 505.
-14Dec05
-- Correct reservation system to do a last ditch try
- for any mounted volume, then anyone anywhere.
-- Add quotes around table Version because of
- error in MySQL 4.1.15 -- bug report submitted.
-- Correct some minor problems with btape in the fill
- command.
-- Updates to ssh-tunnel from Joshua Kugler.
-- Added a report.pl program from Jonas Bjorklund.
-- Simplify the O_NONBLOCK open() code for tape drives,
- and always open nonblocking.
-- Do not wait for open() if EIO returned (shouldn't happen).
-- Eliminate 3 argument to tape open().
-- Correct the slot # edited in the 3995 Bad autochanger unload
- message.
-- With -S on bscan (show progress) do not divide by zero.
-13Dec05
-- Make cancel pthread_cond_signal() pthread_cond_broadcast().
-- When dcr is freed, also broadcast dev->wait_next_vol signal.
-- Remove unused code in wait_for_device.
-- Make wait_for_device() always return after 60 seconds of wait.
-
-Changes to 1.39.2:
-13Dec05
-- Add stubs for non-crypto build.
-12Dec05
-- Use localhost if no network configured
-11Dec05
-- Eliminated duplicate MaxVolBytes in cat update -- bug 509.
-- Remove debug print.
-- Add bail_out in error during state file reading.
-10Dec05
-- Merge changes made to 1.38.3 into HEAD
-- Add stubs for pygtk-console code
-- Create Makefile.in for pygtk-console code
-09Dec05
-- Merge updates into 1.38 branch
-- Update specs to include mysql4 define.
-- Fix when attributes are sent, must be after binit().
-- Stop read_record() if status not ok in second loop.
-- Return rec->FileIndex in dcr->VolLastIndex for normal
- and partial records in read_record(). This allows bscan
- to get FileIndex at EOT correct.
-- Fix butil.c to correctly set dcr -- fixes seg fault in bls.
-08Dec05
-- Fix Win32 built to work with new crypto code.
-- Apply patch supplied by user (slightly modified) to fix
- correct detection of holes in block devices and FIFOs.
- Bug # 506.
-- Apply patch supplied by user (slightly modified)
- to fix SD hang with multiple pools and bad client
- IP. Fixes bug # 508.
-07Dec05
-- Add nagios plugin to the examples directory. Submitted by
- Christian Masopust.
-- Remove warning message about multiple saves of hardlinked files
- from find_one.c as it can generate too many warning messages.
-- Modify most restore error messages to be queued so that they
- appear at the end of the job rather than mixted with the restore
- listing where they could be "lost".
-06Dec05
-- Reset timeout values before select() per patch from
- Frank Sweetser for problems with non-blocking sockets.
-- Unlink the state file if either reading or writing it gets
- errors. Hopefully this will fix Win32 exit problems.
-- Add sanity check in append.c to ensure that dcr is not NULL.
- This can happen if multiple drive autochanger SCSI control
- channel and drive indicies do not correspond.
-05Dec05
-- Get next volume from Scratch pool before creating a volume.
-- Set new Pool defaults in Vol when moved from Scratch Pool.
-- Remove argument from create_bacula_database for SQLite as it
- caused an error.
-- Add back index code so that two drive autochangers can get
- a second tape.
-- Change a bunch of debug levels to aid debugging autochangers.
-- Fix reservation so that mutexes are properly applied.
-- Rework reservation algorithm so that two drives can be used
- at the same time.
-04Dec05
-- Landon merged his data encription changes into the HEAD
-- Apply days keyword patch from Alexander.Bergolth at wu-wien.ac.at
- If this patch is applied, the number of days can be specified with
- "list nextvol days=xx"
- or
- "status dir days=xx"
- My use case is to be able to preview the next scheduled job (and the
- next tape to be used) on fridays if there are no scheduled jobs during
- the weekend.
-Changes to 1.39.1:
-03Dec05
-- Fix font code in gnome2 console user patch. Fixes bug #501.
-- Fix malformatted bnet error message that caused seg fault
- fixes bug 502
-- Applied user patch to improve README.vc8 in src/win32.
-29Nov05
-- Add Migrate, Copy, Archive Job types (some where there)
-- Correct some more editing of JobId's (for 64 bit compatibility).
-- Ensure that StorageId is stored in Media record when ever possible.
-- Add Migration Job to Job.
-- Add Migration Time, Migration High Bytes, Migration Low Bytes
- Next Pool to Pool resource.
-- Add more code to mac.c (migration archive copy).
-- Change Start Storage daemon job to require read and write storage
- pointers.
-- Pass read storage data to SD as well as write storage data.
-- Remove old code from winservice.cpp
-- Break on error in scan.
-- Fix typo in signal.c
-- Separate read/write DCR in SD. Add jcr->read_dcr.
-- Cleanup how find_device() works.
-- Add read output to Status in SD.
-Changes to 1.39.0:
-23Nov05
-- Add red-black btree routines
-21Nov05
-- Remove abs() in bfile.c so that it compiles on Solaris.
- Bug #491.
-20Nov05
-- Fix crash in tray-monitor when daemon disconnects. Bug #479.
-- Fix bnet-server bug found on OpenBSD. Bug #486
-- Fix cancel failure bug. Bug #481
-- Fix failure when Pool name has spaces. Bug #487
-- Fix SD crash in autochanger code. Mutex failure. Bug #488
-- Fix a couple of free()s in src/filed/acl.c
-- Fix memory overrun in bfile.c in building OS X resource
- fork filename. Bug #489
-- Add Pool name to SD status output.
-14Nov05
-- Apply SunOS patch for ACLs submitted by David Duchscher.
-- Make sure to set storage before trying to set drive.
-- Add bacula_mail_summary.sh to examples directory. It makes
- a single email summary of any number of jobs. Submitted
- by Adrew J. Millar.
-- Make sure when we do a mount to unblock the device even
- if the drive could not be opened.
-13Nov05
-- Remove the USE_WIN32STREAMEXTRACTION #defines (always on)
- and correct a few minor problems to make it build on Linux.
-10Nov05
-- Remove delete of CVS from all Makefiles
-- Fix seg fault when clicking on Add button in wx-console
- restore panel. Bug #470.
-- Fix copyright date and URL typo -- bug #468.
-- Change autostart install for FreeBSD to look for rc.conf
- rather than rc.local as suggested fix for bug #466.
-- Apply patch supplied by Eric Bollinger to fix PostgreSQL
- grant on status. Bug #465
-- Apply patch supplied by Eric Bollinger to fix PostgreSQL
- update script. Bug #464
-- Tweak #ifdefing a bit in new Win32 stream code.
-- Fix #ifdeffing for FD_NO_SEND_TEST.
-- Add documentation of performance #defines
--- /dev/null
+ Technical notes on version 1.37
+ Nicolas Boichat
+
+General:
+
+Changes to 1.37.*:
+26Oct05
+ - dvd.c: update VolParts when writing the last part.
+19Oct05
+ - configure: add check for dd, remove check for df
+ - block.c:do_dvd_size_checks: Check we are writing to a dvd before doing tests
+ (reported by David Raine on the list, "[Bacula-users] LTO drive - End Of Volume error").
+ - Update bacula-sd.conf.in to use dvd-handler correctly.
+ - dvd.c:dvd_write_part: Increase timeout when writing the first part (see the code for
+ more details).
+18Oct05
+ - Modify .backups command to get a fileset parameter (fix bug #444).
+17Oct05
+ - Fix bug when recycling DVD devices (append flag was removed).
+ - Add tests for dvd+rw-format in configure script.
+ - scripts/dvd-handler: Reformat DVD-RW when needed. This needs dvd+rw-format.
+ - Add patch for dvd+rw-tools in patches dir (this should probably be elsewhere).
+16Oct05
+ - Remove scripts/dvd-freespace and scripts/dvd-writepart, as they are now
+ merged into scripts/dvd-handler. Note: Documentation needs to be updated.
+ - scripts/dvd-handler: "zero" brand-new DVD+/-RW to fix a problem with some
+ DVD-writers, thanks to Arno Lehmann for reporting this, and providing the
+ way to fix it.
+ - new scripts/dvd-handler. Note: it also needs a patched version of dvd+rw-tools.
+ - new scripts/dvd-freespace. Note: it needs a patched version of dvd+rw-tools.
+ - dvd.c:dvd_write_part: Don't write empty part. (Fix 4GB crossing bug reported by Arno Lehmann)
+14Oct05
+ - dvd.c:dvd_write_part: Use part_size and not max_part_size when setting write timeout.
+ - dvd.c:do_mount_dev: When checking if the DVD is mounted, do not count ., .. and .keep (needed on Gentoo).
+15Aug05
+ - Convert dvd-writepart to Python.
+ - Increase delay from 3 seconds to 5 seconds between SIGTERM and SIGKILL when
+ killing external programs.
+13Aug05
+ - Add gettext macros in autoconf/gettext-macros.
+ - Modify how localedir is set in configure.in.
+ - Remove setlocale check (useless).
+10Aug05
+ - Mark translatable strings in all source files.
+08Aug05
+ - Create French and Italian translation files (fr.po, it.po).
+ - Add support for translation in configure and Makefiles.
+ - Update autoconf/aclocal.m4 so it is automatically created with aclocal
+ (Note: autoconf/gnome-macros is not used anymore, it may be removed).
+30Jul05
+ - Fix src/lib/bpipe.c:run_program and run_program_full_output to detect if the watchdog
+ killed the program, and return an error if it is the case.
+26Apr05
+ - Modify parse_config to get a LEX_ERROR_HANDLER as a parameter
+ - lex_open_file now returns NULL if the file can't be opened. All calling functions have
+ been adapted.
+ - Remove set_exit_on_error function
+07Apr05
+ - Fix "unknown device type" problem with DVD devices.
+ - Fix crash when there is no media in the DVD drive.
+09Jan05
+ - Update the documentation and ReleaseNotes.
+05Jan05
+ - Add FreeSpaceCommand in Device (SD configuration file) and implement it.
+ - Some modifications (again) on how guessed volume names are handled (now it should work).
+ - Part files on the hard disk are removed if they are empty.
+04Jan05
+ - Major fixes on how guessed volume names are handled.
+ - Minor fix in src/stored/append.c.
+ - Replace, when possible, POOLMEM by POOL_MEM in the new code of src/stored/dev.c.
+ - New script, scripts/dvd-freespace, which gets the free space available on a writable DVD.
+03Jan05
+ - Add WritePartAfterJob directive in Job resource (Director)
+ - Add WritePartAfterJob directive in Schedule Resource (Director)
+ - Implement these new directives
+02Jan05
+ - New function, open_guess_name_dev in src/stored/dev.c, which tries to guess the volume
+ name of a mounted device, so the label can be read.
+ - New script, scripts/dvd-writepart, which write parts to DVD+/-R(W).
+ - Removed WriteFirstPartCommand directive in Device (SD configuration file).
+ - Use readdir_r instead of readdir (src/stored/dev.c:open_guess_name_dev).
+01Jan05
+ - Add RequiresMount, MountPoint, MountCommand, UnmountCommand directives in Device (SD configuration file).
+ - Implement these directives (volumes can now be restored from a manually written DVD).
+ - Add WriteFirstPartCommand, WritePartCommand directives in Device (SD configuration file).
+ - Implement these directives (DVD writing now works).
+ - New function run_program_full_output in src/lib/bpipe.c.
+ - Lots of bugfixes and cleanups in the new code.
+29Dec04
+ - Add VolParts field in Media table
+ - Add MaximumPartSize directive in Device (SD configuration file)
+ - File Volumes can now be splitted in multiple files ("parts")
+ - Fix SQL error in sql_list while doing "llist jobmedia"
+++ /dev/null
- Technical notes on version 1.38
- Nicolas Boichat
-
-General:
-
-Release 1.38.0: 31 October 2005
#else
#define have_tls 0
#endif
+/* For compatibility with 1.39 */
+#define cleanup_crypto cleanup_tls
+#define init_crypto init_tls
#ifndef ETIME
#define ETIME ETIMEDOUT
/*
* Default network buffer size
*/
-#define DEFAULT_NETWORK_BUFFER_SIZE (64 * 1024)
+#define DEFAULT_NETWORK_BUFFER_SIZE (32 * 1024)
/*
* Stream definitions. Once defined these must NEVER
*
* STREAM_UNIX_ATTRIBUTES
* STREAM_UNIX_ATTRIBUTES_EX
- * STREAM_MD5_DIGEST
- * STREAM_SHA1_DIGEST
- * STREAM_SHA256_DIGEST
- * STREAM_SHA512_DIGEST
+ * STREAM_MD5_SIGNATURE
+ * STREAM_SHA1_SIGNATURE
*/
-#define STREAM_NONE 0 /* Reserved Non-Stream */
#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */
#define STREAM_FILE_DATA 2 /* Standard uncompressed data */
-#define STREAM_MD5_DIGEST 3 /* MD5 digest for the file */
+#define STREAM_MD5_SIGNATURE 3 /* MD5 signature for the file */
#define STREAM_GZIP_DATA 4 /* GZip compressed file data */
/* Extended Unix attributes with Win32 Extended data. Deprecated. */
#define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX */
#define STREAM_SPARSE_GZIP_DATA 7
#define STREAM_PROGRAM_NAMES 8 /* program names for program data */
#define STREAM_PROGRAM_DATA 9 /* Data needing program */
-#define STREAM_SHA1_DIGEST 10 /* SHA1 digest for the file */
+#define STREAM_SHA1_SIGNATURE 10 /* SHA1 signature for the file */
#define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */
#define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */
#define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */
#define STREAM_UNIX_ATTRIBUTES_ACCESS_ACL 15 /* Standard ACL attributes on UNIX */
#define STREAM_UNIX_ATTRIBUTES_DEFAULT_ACL 16 /* Default ACL attributes on UNIX */
/*** FIXME ***/
-#define STREAM_SHA256_DIGEST 17 /* SHA-256 digest for the file */
-#define STREAM_SHA512_DIGEST 18 /* SHA-512 digest for the file */
-#define STREAM_SIGNED_DIGEST 19 /* Signed File Digest, ASN.1 Encoded */
-#define STREAM_ENCRYPTED_FILE_DATA 20 /* Encrypted, uncompressed data */
-#define STREAM_ENCRYPTED_WIN32_DATA 21 /* Encrypted, uncompressed Win32 BackupRead data */
/*
/* Definitions for upper part of type word (see above). */
#define AR_DATA_STREAM (1<<16) /* Data stream id present */
+/*
+ * Internal code for Signature types
+ */
+#define NO_SIG 0
+#define MD5_SIG 1
+#define SHA1_SIG 2
+
/*
* Tape label types -- stored in catalog
*/
#include <openssl/x509v3.h>
#include <openssl/rand.h>
#include <openssl/err.h>
-#include <openssl/asn1.h>
-#include <openssl/asn1t.h>
#undef STORE
#endif
/*
- Copyright (C) 2000-2005 Kern Sibbald
+ Copyright (C) 2000-2006 Kern Sibbald
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
return stat;
}
-int db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest, int type)
+int db_add_SIG_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *SIG, int type)
{
return 1;
}
DBId_t PathId;
DBId_t FilenameId;
FileId_t FileId;
- char *Digest;
- int DigestType;
+ char *Sig;
+ int SigType;
};
DBId_t PathId;
JobId_t MarkId;
char LStat[256];
- char Digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)];
- int DigestType; /* NO_SIG/MD5_SIG/SHA1_SIG */
+ char SIG[50];
+ int SigType; /* NO_SIG/MD5_SIG/SHA1_SIG */
};
/* Pool record -- same format as database */
int db_update_media_record(JCR *jcr, B_DB *db, MEDIA_DBR *mr);
int db_update_media_defaults(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr);
int db_update_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr);
-int db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest, int type);
+int db_add_SIG_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *SIG, int type);
int db_mark_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, JobId_t JobId);
void db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr);
static int db_create_file_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
int stat;
- static char *no_digest = "0";
- char *digest;
+ static char *no_sig = "0";
+ char *sig;
ASSERT(ar->JobId);
ASSERT(ar->PathId);
ASSERT(ar->FilenameId);
- if (ar->Digest == NULL) {
- digest = no_digest;
+ if (ar->Sig == NULL) {
+ sig = no_sig;
} else {
- digest = ar->Digest;
+ sig = ar->Sig;
}
/* Must create it */
"INSERT INTO File (FileIndex,JobId,PathId,FilenameId,"
"LStat,MD5) VALUES (%u,%u,%u,%u,'%s','%s')",
ar->FileIndex, ar->JobId, ar->PathId, ar->FilenameId,
- ar->attr, digest);
+ ar->attr, sig);
if (!INSERT_DB(jcr, mdb, mdb->cmd)) {
Mmsg2(&mdb->errmsg, _("Create db File record %s failed. ERR=%s"),
} else {
fdbr->FileId = (FileId_t)str_to_int64(row[0]);
bstrncpy(fdbr->LStat, row[1], sizeof(fdbr->LStat));
- bstrncpy(fdbr->Digest, row[2], sizeof(fdbr->Digest));
+ bstrncpy(fdbr->SIG, row[2], sizeof(fdbr->SIG));
stat = 1;
}
} else {
*/
-
/* The following is necessary so that we do not include
* the dummy external definition of DB.
*/
*/
/* Update the attributes record by adding the file digest */
int
-db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest,
+db_add_SIG_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest,
int type)
{
int stat;
- char ed1[CRYPTO_DIGEST_MAX_SIZE];
+ char ed1[50];
db_lock(mdb);
Mmsg(mdb->cmd, "UPDATE File SET MD5='%s' WHERE FileId=%s", digest,
/*
- Copyright (C) 2000-2005 Kern Sibbald
+ Copyright (C) 2000-2006 Kern Sibbald
- This program is free software; you can redistribute it and/or
- modify it under the terms of the Lesser GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
+ This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307, USA.
*/
@$(MV) Makefile Makefile.bak
@$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile
@$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile
- @$(CXX) -S -M $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile
+ @$(CXX) -S -M $(CPPFLAGS) $(CONS_INC) $(OPENSSL_INC) -I$(srcdir) -I$(basedir) *.c >> Makefile
@if test -f Makefile ; then \
$(RMF) Makefile.bak; \
else \
parse_config(configfile);
- if (init_crypto() != 0) {
- Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n"));
+ if (init_tls() != 0) {
+ Emsg0(M_ERROR_TERM, 0, _("TLS library initialization failed.\n"));
}
if (!check_resources()) {
exit(1);
}
already_here = true;
- cleanup_crypto();
+ cleanup_tls();
free_pool_memory(args);
con_term();
(void)WSACleanup(); /* Cleanup Windows sockets */
#include "bacula.h"
#include "dird.h"
-#include "findlib/find.h"
/*
* Handle catalog request
ar->Stream = Stream;
ar->link = NULL;
ar->JobId = jcr->JobId;
- ar->Digest = NULL;
- ar->DigestType = CRYPTO_DIGEST_NONE;
+ ar->Sig = NULL;
+ ar->SigType = 0;
jcr->cached_attribute = true;
Dmsg2(400, "dird<filed: stream=%d %s\n", Stream, fname);
Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
#endif
- } else if (crypto_digest_stream_type(Stream) != CRYPTO_DIGEST_NONE) {
+ } else if (Stream == STREAM_MD5_SIGNATURE || Stream == STREAM_SHA1_SIGNATURE) {
fname = p;
if (ar->FileIndex != FileIndex) {
- Jmsg(jcr, M_WARNING, 0, _("Got %s but not same File as attributes\n"), stream_to_ascii(Stream));
+ Jmsg(jcr, M_WARNING, 0, _("Got MD5/SHA1 but not same File as attributes\n"));
} else {
- /* Update digest in catalog */
- char digestbuf[CRYPTO_DIGEST_MAX_SIZE];
- int len = 0;
- int type = CRYPTO_DIGEST_NONE;
-
- switch(Stream) {
- case STREAM_MD5_DIGEST:
- len = CRYPTO_DIGEST_MD5_SIZE;
- type = CRYPTO_DIGEST_MD5;
- break;
- case STREAM_SHA1_DIGEST:
- len = CRYPTO_DIGEST_SHA1_SIZE;
- type = CRYPTO_DIGEST_SHA1;
- break;
- case STREAM_SHA256_DIGEST:
- len = CRYPTO_DIGEST_SHA256_SIZE;
- type = CRYPTO_DIGEST_SHA256;
- break;
- case STREAM_SHA512_DIGEST:
- len = CRYPTO_DIGEST_SHA512_SIZE;
- type = CRYPTO_DIGEST_SHA512;
- break;
- default:
- /* Never reached ... */
- Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. Unsupported digest stream type: %d"),
- Stream);
+ /* Update signature in catalog */
+ char SIGbuf[50]; /* 24 bytes should be enough */
+ int len, type;
+ if (Stream == STREAM_MD5_SIGNATURE) {
+ len = 16;
+ type = MD5_SIG;
+ } else {
+ len = 20;
+ type = SHA1_SIG;
}
-
- bin_to_base64(digestbuf, fname, len);
- Dmsg3(400, "DigestLen=%d Digest=%s type=%d\n", strlen(digestbuf), digestbuf, Stream);
+ bin_to_base64(SIGbuf, fname, len);
+ Dmsg3(400, "SIGlen=%d SIG=%s type=%d\n", strlen(SIGbuf), SIGbuf, Stream);
if (jcr->cached_attribute) {
- ar->Digest = digestbuf;
- ar->DigestType = type;
- Dmsg2(400, "Cached attr with digest. Stream=%d fname=%s\n", ar->Stream, ar->fname);
+ ar->Sig = SIGbuf;
+ ar->SigType = type;
+ Dmsg2(400, "Cached attr with SIG. Stream=%d fname=%s\n", ar->Stream, ar->fname);
if (!db_create_file_attributes_record(jcr, jcr->db, ar)) {
Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
jcr->cached_attribute = false;
} else {
- if (!db_add_digest_to_file_record(jcr, jcr->db, ar->FileId, digestbuf, type)) {
- Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. %s"),
+ if (!db_add_SIG_to_file_record(jcr, jcr->db, ar->FileId, SIGbuf, type)) {
+ Jmsg(jcr, M_ERROR, 0, _("Catalog error updating MD5/SHA1. %s"),
db_strerror(jcr->db));
}
}
{"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
{"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
{"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
- {"migrationjob", store_res, ITEM(res_job.migration_job), R_JOB, 0, 0},
{"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
{"run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0},
{"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
{"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
{"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
{"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
- {"migrationtime", store_time, ITEM(res_pool.MigrationTime), 0, 0, 0},
- {"migrationhighbytes", store_size, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
- {"migrationlowbytes", store_size, ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
- {"nextpool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0},
{"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
{"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
{NULL, NULL, NULL, 0, 0, 0}
{"admin", JT_ADMIN},
{"verify", JT_VERIFY},
{"restore", JT_RESTORE},
- {"copy", JT_COPY},
- {"migrate", JT_MIGRATE},
{NULL, 0}
};
{
URES *res = (URES *)reshdr;
bool recurse = true;
- char ed1[100], ed2[100], ed3[100];
+ char ed1[100], ed2[100];
DEVICE *dev;
if (res == NULL) {
}
break;
case R_CONSOLE:
+#ifdef HAVE_TLS
sendit(sock, _("Console: name=%s SSL=%d\n"),
res->res_con.hdr.name, res->res_con.tls_enable);
+#else
+ sendit(sock, _("Console: name=%s SSL=%d\n"),
+ res->res_con.hdr.name, BNET_TLS_NONE);
+#endif
break;
case R_COUNTER:
if (res->res_counter.WrapCounter) {
res->res_pool.recycle_oldest_volume,
res->res_pool.purge_oldest_volume,
res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
- sendit(sock, _(" MigTime=%s MigHiBytes=%s MigLoBytes=%s\n"),
- edit_utime(res->res_pool.MigrationTime, ed1, sizeof(ed1)),
- edit_uint64(res->res_pool.MigrationHighBytes, ed2),
- edit_uint64(res->res_pool.MigrationLowBytes, ed3));
- if (res->res_pool.NextPool) {
- sendit(sock, _(" --> "));
- dump_resource(-R_POOL, (RES *)res->res_pool.NextPool, sendit, sock);
- }
break;
case R_MSGS:
sendit(sock, _("Messages: name=%s\n"), res->res_msgs.hdr.name);
switch (type) {
/* Resources not containing a resource */
case R_CATALOG:
+ case R_POOL:
case R_MSGS:
case R_FILESET:
case R_DEVICE:
break;
- /*
- * Resources containing another resource or alist. First
- * look up the resource which contains another resource. It
- * was written during pass 1. Then stuff in the pointers to
- * the resources it contains, which were inserted this pass.
- * Finally, it will all be stored back.
- */
- case R_POOL:
- /* Find resource saved in pass 1 */
- if ((res = (URES *)GetResWithName(R_POOL, res_all.res_con.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Pool resource %s\n"), res_all.res_con.hdr.name);
- }
- /* Update it with pointer to NextPool from this pass (res_all) */
- res->res_pool.NextPool = res_all.res_pool.NextPool;
- break;
+ /* Resources containing another resource or alist */
case R_CONSOLE:
if ((res = (URES *)GetResWithName(R_CONSOLE, res_all.res_con.hdr.name)) == NULL) {
Emsg1(M_ERROR_TERM, 0, _("Cannot find Console resource %s\n"), res_all.res_con.hdr.name);
#include "bacula.h"
#include "dird.h"
-#include "findlib/find.h"
/* Commands sent to File daemon */
static char filesetcmd[] = "fileset%s\n"; /* set full fileset */
jcr->FileIndex = 0;
Dmsg0(120, "bdird: waiting to receive file attributes\n");
- /* Pickup file attributes and digest */
+ /* Pickup file attributes and signature */
while (!fd->errors && (n = bget_dirmsg(fd)) > 0) {
/*****FIXME****** improve error handling to stop only on
long file_index;
int stream, len;
char *attr, *p, *fn;
- char Opts_Digest[MAXSTRING]; /* either Verify opts or MD5/SHA1 digest */
- char digest[CRYPTO_DIGEST_MAX_SIZE];
+ char Opts_SIG[MAXSTRING]; /* either Verify opts or MD5/SHA1 signature */
+ char SIG[MAXSTRING];
jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen);
- if ((len = sscanf(fd->msg, "%ld %d %s", &file_index, &stream, Opts_Digest)) != 3) {
+ if ((len = sscanf(fd->msg, "%ld %d %s", &file_index, &stream, Opts_SIG)) != 3) {
Jmsg(jcr, M_FATAL, 0, _("<filed: bad attributes, expected 3 fields got %d\n"
"msglen=%d msg=%s\n"), len, fd->msglen, fd->msg);
set_jcr_job_status(jcr, JS_ErrorTerminated);
ar.ClientId = jcr->ClientId;
ar.PathId = 0;
ar.FilenameId = 0;
- ar.Digest = NULL;
- ar.DigestType = CRYPTO_DIGEST_NONE;
+ ar.Sig = NULL;
+ ar.SigType = 0;
Dmsg2(111, "dird<filed: stream=%d %s\n", stream, jcr->fname);
Dmsg1(120, "dird<filed: attr=%s\n", attr);
continue;
}
jcr->FileId = ar.FileId;
- } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) {
+ } else if (stream == STREAM_MD5_SIGNATURE || stream == STREAM_SHA1_SIGNATURE) {
if (jcr->FileIndex != (uint32_t)file_index) {
- Jmsg3(jcr, M_ERROR, 0, _("%s index %d not same as attributes %d\n"),
- stream_to_ascii(stream), file_index, jcr->FileIndex);
+ Jmsg2(jcr, M_ERROR, 0, _("MD5/SHA1 index %d not same as attributes %d\n"),
+ file_index, jcr->FileIndex);
set_jcr_job_status(jcr, JS_Error);
continue;
}
- db_escape_string(digest, Opts_Digest, strlen(Opts_Digest));
- Dmsg2(120, "DigestLen=%d Digest=%s\n", strlen(digest), digest);
- if (!db_add_digest_to_file_record(jcr, jcr->db, jcr->FileId, digest,
- crypto_digest_stream_type(stream))) {
+ db_escape_string(SIG, Opts_SIG, strlen(Opts_SIG));
+ Dmsg2(120, "SIGlen=%d SIG=%s\n", strlen(SIG), SIG);
+ if (!db_add_SIG_to_file_record(jcr, jcr->db, jcr->FileId, SIG,
+ stream==STREAM_MD5_SIGNATURE?MD5_SIG:SHA1_SIG)) {
Jmsg1(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db));
set_jcr_job_status(jcr, JS_Error);
}
enum {
INC_KW_NONE,
INC_KW_COMPRESSION,
- INC_KW_DIGEST,
+ INC_KW_SIGNATURE,
INC_KW_ENCRYPTION,
INC_KW_VERIFY,
INC_KW_ONEFS,
*/
static struct s_kw FS_option_kw[] = {
{"compression", INC_KW_COMPRESSION},
- {"signature", INC_KW_DIGEST},
+ {"signature", INC_KW_SIGNATURE},
{"encryption", INC_KW_ENCRYPTION},
{"verify", INC_KW_VERIFY},
{"onefs", INC_KW_ONEFS},
* included files.
*/
static struct s_fs_opt FS_options[] = {
- {"md5", INC_KW_DIGEST, "M"},
- {"sha1", INC_KW_DIGEST, "S"},
- {"sha256", INC_KW_DIGEST, "S2"},
- {"sha512", INC_KW_DIGEST, "S3"},
+ {"md5", INC_KW_SIGNATURE, "M"},
+ {"sha1", INC_KW_SIGNATURE, "S"},
{"gzip", INC_KW_COMPRESSION, "Z6"},
{"gzip1", INC_KW_COMPRESSION, "Z1"},
{"gzip2", INC_KW_COMPRESSION, "Z2"},
admin_cleanup(jcr, JS_ErrorTerminated);
}
break;
- case JT_MIGRATE:
+ case JT_MIGRATION:
case JT_COPY:
case JT_ARCHIVE:
if (!do_mac_init(jcr)) { /* migration, archive, copy */
admin_cleanup(jcr, JS_ErrorTerminated);
}
break;
- case JT_MIGRATE:
+ case JT_MIGRATION:
case JT_COPY:
case JT_ARCHIVE:
if (do_mac(jcr)) { /* migration, archive, copy */
bstrncpy(fsr.FileSet, jcr->fileset->hdr.name, sizeof(fsr.FileSet));
if (jcr->fileset->have_MD5) {
struct MD5Context md5c;
- unsigned char digest[MD5HashSize];
+ unsigned char signature[16];
memcpy(&md5c, &jcr->fileset->md5c, sizeof(md5c));
- MD5Final(digest, &md5c);
- bin_to_base64(fsr.MD5, (char *)digest, MD5HashSize);
+ MD5Final(signature, &md5c);
+ bin_to_base64(fsr.MD5, (char *)signature, 16); /* encode 16 bytes */
bstrncpy(jcr->fileset->MD5, fsr.MD5, sizeof(jcr->fileset->MD5));
} else {
- Jmsg(jcr, M_WARNING, 0, _("FileSet MD5 digest not found.\n"));
+ Jmsg(jcr, M_WARNING, 0, _("FileSet MD5 signature not found.\n"));
}
if (!jcr->fileset->ignore_fs_changes ||
!db_get_fileset_record(jcr, jcr->db, &fsr)) {
AND Media.MediaId=JobMedia.MediaId
AND JobMedia.JobId=Job.JobId
ORDER by Job.StartTime;
-# 16
-:List File record for given Job and File
-*Enter JobId:
-*Enter Full path (no filename) with trailing slash:
-*Enter Filename:
-SELECT File.JobId AS JobId,FileIndex FROM File,Path,Filename
- WHERE File.JobId=%1 AND
- Path.Path='%2' AND Filename.Name='%3' AND
- File.PathId=Path.PathId AND File.FilenameId=Filename.FilenameId;
-SELECT JobId,Name,VolSessionId,VolsessionTime,JobFiles FROM Job WHERE JobId=%1;
-SELECT JobId,MediaId,FirstIndex,LastIndex,StartFile,EndFile,StartBlock,EndBlock,
- VolIndex FROM JobMedia WHERE JobId=%1;
-SELECT VolumeName FROM Media,JobMedia WHERE JobMedia.JobId=%1 AND
- Media.MediaId=JobMedia.MediaId;
static int defaultscmd(UAContext *ua, const char *cmd)
{
JOB *job;
- CLIENT *client;
- STORE *storage;
- POOL *pool;
-
- /* Job defaults */
if (ua->argc == 2 && strcmp(ua->argk[1], "job") == 0) {
job = (JOB *)GetResWithName(R_JOB, ua->argv[1]);
if (job) {
bsendmsg(ua, "type=%s", job_type_to_str(job->JobType));
bsendmsg(ua, "fileset=%s", job->fileset->hdr.name);
}
- }
- /* Client defaults */
- else if(ua->argc == 2 && strcmp(ua->argk[1], "client") == 0) {
- client = (CLIENT *)GetResWithName(R_CLIENT, ua->argv[1]);
- if (client) {
- bsendmsg(ua, "client=%s", client->hdr.name);
- bsendmsg(ua, "address=%s", client->address);
- bsendmsg(ua, "fdport=%d", client->FDport);
- bsendmsg(ua, "file_retention=%d", client->FileRetention);
- bsendmsg(ua, "job_retention=%d", client->JobRetention);
- bsendmsg(ua, "autoprune=%d", client->AutoPrune);
- }
- }
- /* Storage defaults */
- else if(ua->argc == 2 && strcmp(ua->argk[1], "storage") == 0) {
- storage = (STORE *)GetResWithName(R_STORAGE, ua->argv[1]);
- DEVICE *device = (DEVICE *)storage->device->first();
- if (storage) {
- bsendmsg(ua, "storage=%s", storage->hdr.name);
- bsendmsg(ua, "address=%s", storage->address);
- bsendmsg(ua, "media_type=%s", storage->media_type);
- bsendmsg(ua, "sdport=%d", storage->SDport);
- bsendmsg(ua, "name=%s", storage->hdr.name);
- bsendmsg(ua, "device=%s", device->hdr.name);
- if (storage->device->size() > 1)
- while ((device = (DEVICE *)storage->device->next()))
- bsendmsg(ua, ",%s", device->hdr.name);
- }
- }
- /* Pool defaults */
- else if(ua->argc == 2 && strcmp(ua->argk[1], "pool") == 0) {
- pool = (POOL *)GetResWithName(R_POOL, ua->argv[1]);
- if (pool) {
- bsendmsg(ua, "pool=%s", pool->hdr.name);
- bsendmsg(ua, "pool_type=%s", pool->pool_type);
- bsendmsg(ua, "label_format=%s", pool->label_format);
- bsendmsg(ua, "use_volume_once=%d", pool->use_volume_once);
- bsendmsg(ua, "accept_any_volume=%d", pool->accept_any_volume);
- bsendmsg(ua, "purge_oldest_volume=%d", pool->purge_oldest_volume);
- bsendmsg(ua, "recycle_oldest_volume=%d", pool->recycle_oldest_volume);
- bsendmsg(ua, "recycle_current_volume=%d", pool->recycle_current_volume);
- bsendmsg(ua, "max_volumes=%d", pool->max_volumes);
- bsendmsg(ua, "vol_retention=%d", pool->VolRetention);
- bsendmsg(ua, "vol_use_duration=%d", pool->VolUseDuration);
- bsendmsg(ua, "max_vol_jobs=%d", pool->MaxVolJobs);
- bsendmsg(ua, "max_vol_files=%d", pool->MaxVolFiles);
- bsendmsg(ua, "max_vol_bytes=%d", pool->MaxVolBytes);
- bsendmsg(ua, "auto_prune=%d", pool->AutoPrune);
- bsendmsg(ua, "recycle=%d", pool->Recycle);
- }
}
return 1;
}
item = -1;
goto done;
}
-// bnet_sig(ua->UA_sock, BNET_START_SELECT);
bsendmsg(ua, ua->prompt[0]);
for (i=1; i < ua->num_prompts; i++) {
bsendmsg(ua, "%6d: %s\n", i, ua->prompt[i]);
}
-// bnet_sig(ua->UA_sock, BNET_END_SELECT);
for ( ;; ) {
/* First item is the prompt string, not the items */
int stat = JS_Terminated;
char buf[MAXSTRING];
POOLMEM *fname = get_pool_memory(PM_MESSAGE);
- int do_Digest = CRYPTO_DIGEST_NONE;
+ int do_SIG = NO_SIG;
int32_t file_index = 0;
memset(&fdbr, 0, sizeof(FILE_DBR));
* We expect:
* FileIndex
* Stream
- * Options or Digest (MD5/SHA1)
+ * Options or SIG (MD5/SHA1)
* Filename
* Attributes
* Link name ???
while ((n=bget_dirmsg(fd)) >= 0 && !job_canceled(jcr)) {
int stream;
char *attr, *p, *fn;
- char Opts_Digest[MAXSTRING]; /* Verify Opts or MD5/SHA1 digest */
+ char Opts_SIG[MAXSTRING]; /* Verify Opts or MD5/SHA1 signature */
fname = check_pool_memory_size(fname, fd->msglen);
jcr->fname = check_pool_memory_size(jcr->fname, fd->msglen);
- Dmsg1(200, "Atts+Digest=%s\n", fd->msg);
+ Dmsg1(200, "Atts+SIG=%s\n", fd->msg);
if ((len = sscanf(fd->msg, "%ld %d %100s", &file_index, &stream,
fname)) != 3) {
Jmsg3(jcr, M_FATAL, 0, _("bird<filed: bad attributes, expected 3 fields got %d\n"
* We read the Options or Signature into fname
* to prevent overrun, now copy it to proper location.
*/
- bstrncpy(Opts_Digest, fname, sizeof(Opts_Digest));
+ bstrncpy(Opts_SIG, fname, sizeof(Opts_SIG));
p = fd->msg;
skip_nonspaces(&p); /* skip FileIndex */
skip_spaces(&p);
skip_nonspaces(&p); /* skip Stream */
skip_spaces(&p);
- skip_nonspaces(&p); /* skip Opts_Digest */
+ skip_nonspaces(&p); /* skip Opts_SIG */
p++; /* skip space */
fn = fname;
while (*p != 0) {
jcr->JobFiles++;
jcr->FileIndex = file_index; /* remember attribute file_index */
decode_stat(attr, &statf, &LinkFIf); /* decode file stat packet */
- do_Digest = CRYPTO_DIGEST_NONE;
+ do_SIG = NO_SIG;
jcr->fn_printed = false;
pm_strcpy(jcr->fname, fname); /* move filename into JCR */
}
Dmsg3(400, "Found %s in catalog. inx=%d Opts=%s\n", jcr->fname,
- file_index, Opts_Digest);
+ file_index, Opts_SIG);
decode_stat(fdbr.LStat, &statc, &LinkFIc); /* decode catalog stat */
/*
* Loop over options supplied by user and verify the
* fields he requests.
*/
- for (p=Opts_Digest; *p; p++) {
+ for (p=Opts_SIG; *p; p++) {
char ed1[30], ed2[30];
switch (*p) {
case 'i': /* compare INODEs */
break;
case '5': /* compare MD5 */
Dmsg1(500, "set Do_MD5 for %s\n", jcr->fname);
- do_Digest = CRYPTO_DIGEST_MD5;
+ do_SIG = MD5_SIG;
break;
case '1': /* compare SHA1 */
- do_Digest = CRYPTO_DIGEST_SHA1;
+ do_SIG = SHA1_SIG;
break;
case ':':
case 'V':
}
}
/*
- * Got Digest Signature from Storage daemon
- * It came across in the Opts_Digest field.
+ * Got SIG Signature from Storage daemon
+ * It came across in the Opts_SIG field.
*/
- } else if (crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) {
- Dmsg2(400, "stream=Digest inx=%d Digest=%s\n", file_index, Opts_Digest);
+ } else if (stream == STREAM_MD5_SIGNATURE || stream == STREAM_SHA1_SIGNATURE) {
+ Dmsg2(400, "stream=SIG inx=%d SIG=%s\n", file_index, Opts_SIG);
/*
- * When ever we get a digest is MUST have been
+ * When ever we get a signature is MUST have been
* preceded by an attributes record, which sets attr_file_index
*/
if (jcr->FileIndex != (uint32_t)file_index) {
file_index, jcr->FileIndex);
return false;
}
- if (do_Digest != CRYPTO_DIGEST_NONE) {
- db_escape_string(buf, Opts_Digest, strlen(Opts_Digest));
- if (strcmp(buf, fdbr.Digest) != 0) {
+ if (do_SIG) {
+ db_escape_string(buf, Opts_SIG, strlen(Opts_SIG));
+ if (strcmp(buf, fdbr.SIG) != 0) {
prt_fname(jcr);
if (debug_level >= 10) {
Jmsg(jcr, M_INFO, 0, _(" %s not same. File=%s Cat=%s\n"),
- stream_to_ascii(stream), buf, fdbr.Digest);
+ stream==STREAM_MD5_SIGNATURE?"MD5":"SHA1", buf, fdbr.SIG);
} else {
Jmsg(jcr, M_INFO, 0, _(" %s differs.\n"),
- stream_to_ascii(stream));
+ stream==STREAM_MD5_SIGNATURE?"MD5":"SHA1");
}
stat = JS_Differences;
}
- do_Digest = CRYPTO_DIGEST_NONE;
+ do_SIG = FALSE;
}
}
jcr->JobFiles = file_index;
dummy:
#
-SVRSRCS = filed.c authenticate.c acl.c backup.c estimate.c \
+SVRSRCS = filed.c authenticate.c acl.c backup.c chksum.c estimate.c \
filed_conf.c heartbeat.c job.c pythonfd.c \
restore.c status.c verify.c verify_vol.c
-SVROBJS = filed.o authenticate.o acl.o backup.o estimate.o \
+SVROBJS = filed.o authenticate.o acl.o backup.o chksum.o estimate.o \
filed_conf.o heartbeat.o job.o pythonfd.o \
restore.o status.o verify.o verify_vol.o
/* Forward referenced functions */
static int save_file(FF_PKT *ff_pkt, void *pkt, bool top_level);
-static int send_data(JCR *jcr, int stream, FF_PKT *ff_pkt, DIGEST *digest, DIGEST *signature_digest);
+static int send_data(JCR *jcr, int stream, FF_PKT *ff_pkt, struct CHKSUM *chksum);
static bool encode_and_send_attributes(JCR *jcr, FF_PKT *ff_pkt, int &data_stream);
static bool read_and_send_acl(JCR *jcr, int acltype, int stream);
{
BSOCK *sd;
bool ok = true;
- // TODO landonf: Allow user to specify encryption algorithm
- crypto_cipher_t cipher = CRYPTO_CIPHER_AES_128_CBC;
sd = jcr->store_bsock;
jcr->compress_buf_size = jcr->buf_size + ((jcr->buf_size+999) / 1000) + 30;
jcr->compress_buf = get_memory(jcr->compress_buf_size);
- if (jcr->pki_encrypt) {
- /* Create per-job session encryption context */
- jcr->pki_recipients = crypto_recipients_new(cipher, jcr->pki_readers);
- }
-
Dmsg1(300, "set_find_options ff=%p\n", jcr->ff);
set_find_options((FF_PKT *)jcr->ff, jcr->incremental, jcr->mtime);
Dmsg0(300, "start find files\n");
free_pool_memory(jcr->compress_buf);
jcr->compress_buf = NULL;
}
-
- if (jcr->pki_recipients) {
- crypto_recipients_free(jcr->pki_recipients);
- }
-
Dmsg1(100, "end blast_data ok=%d\n", ok);
return ok;
}
static int save_file(FF_PKT *ff_pkt, void *vjcr, bool top_level)
{
int stat, data_stream;
- DIGEST *digest = NULL;
- DIGEST *signing_digest = NULL;
- int digest_stream = STREAM_NONE;
- // TODO landonf: Allow the user to specify the digest algorithm
-#ifdef HAVE_SHA2
- crypto_digest_t signing_algorithm = CRYPTO_DIGEST_SHA256;
-#else
- crypto_digest_t signing_algorithm = CRYPTO_DIGEST_SHA1;
-#endif
+ struct CHKSUM chksum;
BSOCK *sd;
JCR *jcr = (JCR *)vjcr;
Dmsg1(130, "bfiled: sending %s to stored\n", ff_pkt->fname);
- /*
- * Setup for digest handling. If this fails, the digest will be set to NULL
- * and not used.
- */
- if (ff_pkt->flags & FO_MD5) {
- digest = crypto_digest_new(CRYPTO_DIGEST_MD5);
- digest_stream = STREAM_MD5_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA1) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA1);
- digest_stream = STREAM_SHA1_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA256) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA256);
- digest_stream = STREAM_SHA256_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA512) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA512);
- digest_stream = STREAM_SHA512_DIGEST;
- }
-
- /* Did digest initialization fail? */
- if (digest_stream != STREAM_NONE && digest == NULL) {
- Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"),
- stream_to_ascii(digest_stream));
- }
/*
- * Set up signature digest handling. If this fails, the signature digest will be set to
- * NULL and not used.
+ * Setup for signature handling.
+ * Then initialise the file descriptor we use for data and other streams.
*/
- // TODO landonf: We should really only calculate the digest once, for both verification and signing.
- if (jcr->pki_sign) {
- signing_digest = crypto_digest_new(signing_algorithm);
- }
+ chksum_init(&chksum, ff_pkt->flags);
- /* Full-stop if a failure occured initializing the signature digest */
- if (jcr->pki_sign && signing_digest == NULL) {
- Jmsg(jcr, M_NOTSAVED, 0, _("%s signature digest initialization failed\n"),
- stream_to_ascii(signing_algorithm));
- jcr->Errors++;
- return 1;
- }
-
- /* Initialise the file descriptor we use for data and other streams. */
binit(&ff_pkt->bfd);
if (ff_pkt->flags & FO_PORTABLE) {
set_portable_backup(&ff_pkt->bfd); /* disable Win32 BackupRead() */
}
}
- /* Send attributes -- must be done after binit() */
if (!encode_and_send_attributes(jcr, ff_pkt, data_stream)) {
return 0;
}
stop_thread_timer(tid);
tid = NULL;
}
- stat = send_data(jcr, data_stream, ff_pkt, digest, signing_digest);
+ stat = send_data(jcr, data_stream, ff_pkt, &chksum);
bclose(&ff_pkt->bfd);
if (!stat) {
return 0;
}
flags = ff_pkt->flags;
ff_pkt->flags &= ~(FO_GZIP|FO_SPARSE);
- stat = send_data(jcr, STREAM_MACOS_FORK_DATA, ff_pkt, digest);
+ stat = send_data(jcr, STREAM_MACOS_FORK_DATA, ff_pkt, &chksum);
ff_pkt->flags = flags;
bclose(&ff_pkt->bfd);
if (!stat) {
Dmsg1(300, "bfiled>stored:header %s\n", sd->msg);
memcpy(sd->msg, ff_pkt->hfsinfo.fndrinfo, 32);
sd->msglen = 32;
- if (digest) {
- crypto_digest_update(digest, sd->msg, sd->msglen);
- }
- if (signature_digest) {
- crypto_digest_update(signature_digest, sd->msg, sd->msglen);
- }
+ chksum_update(&chksum, (unsigned char *)sd->msg, sd->msglen);
bnet_send(sd);
bnet_sig(sd, BNET_EOD);
}
}
}
- /* Terminate the signing digest and send it to the Storage daemon */
- if (signing_digest) {
- SIGNATURE *sig;
- size_t size = 0;
- void *buf;
-
- if ((sig = crypto_sign_new()) == NULL) {
- Jmsg(jcr, M_FATAL, 0, _("Failed to allocate memory for stream signature.\n"));
- return 0;
- }
-
- if (crypto_sign_add_signer(sig, signing_digest, jcr->pki_keypair) == false) {
- Jmsg(jcr, M_FATAL, 0, _("An error occured while signing the stream.\n"));
- return 0;
- }
-
- /* Get signature size */
- if (crypto_sign_encode(sig, NULL, &size) == false) {
- Jmsg(jcr, M_FATAL, 0, _("An error occured while signing the stream.\n"));
- return 0;
- }
-
- /* Allocate signature data buffer */
- buf = malloc(size);
- if (!buf) {
- free(buf);
- return 0;
- }
-
- /* Encode signature data */
- if (crypto_sign_encode(sig, buf, &size) == false) {
- Jmsg(jcr, M_FATAL, 0, _("An error occured while signing the stream.\n"));
- return 0;
- }
-
- /* Send our header */
- bnet_fsend(sd, "%ld %d 0", jcr->JobFiles, STREAM_SIGNED_DIGEST);
- Dmsg1(300, "bfiled>stored:header %s\n", sd->msg);
-
- /* Grow the bsock buffer to fit our message if necessary */
- if ((size_t) sizeof_pool_memory(sd->msg) < size) {
- sd->msg = realloc_pool_memory(sd->msg, size);
+ /* Terminate any signature and send it to Storage daemon and the Director */
+ if (chksum.updated) {
+ int stream = 0;
+ chksum_final(&chksum);
+ if (chksum.type == CHKSUM_MD5) {
+ stream = STREAM_MD5_SIGNATURE;
+ } else if (chksum.type == CHKSUM_SHA1) {
+ stream = STREAM_SHA1_SIGNATURE;
+ } else {
+ Jmsg1(jcr, M_WARNING, 0, _("Unknown signature type %i.\n"), chksum.type);
}
-
- /* Copy our message over and send it */
- memcpy(sd->msg, buf, size);
- sd->msglen = size;
- bnet_send(sd);
- bnet_sig(sd, BNET_EOD); /* end of checksum */
-
- crypto_digest_free(signing_digest);
- crypto_sign_free(sig);
- free(buf);
- }
-
- /* Terminate any digest and send it to Storage daemon and the Director */
- if (digest) {
- char md[CRYPTO_DIGEST_MAX_SIZE];
- size_t size;
-
- size = sizeof(md);
-
- if (crypto_digest_finalize(digest, &md, &size)) {
- bnet_fsend(sd, "%ld %d 0", jcr->JobFiles, digest_stream);
+ if (stream != 0) {
+ bnet_fsend(sd, "%ld %d 0", jcr->JobFiles, stream);
Dmsg1(300, "bfiled>stored:header %s\n", sd->msg);
- memcpy(sd->msg, md, size);
- sd->msglen = size;
+ memcpy(sd->msg, chksum.signature, chksum.length);
+ sd->msglen = chksum.length;
bnet_send(sd);
bnet_sig(sd, BNET_EOD); /* end of checksum */
}
-
- crypto_digest_free(digest);
}
return 1;
* Currently this is not a problem as the only other stream, resource forks,
* are not handled as sparse files.
*/
-int send_data(JCR *jcr, int stream, FF_PKT *ff_pkt, DIGEST *digest, DIGEST *signing_digest)
+static int send_data(JCR *jcr, int stream, FF_PKT *ff_pkt, struct CHKSUM *chksum)
{
BSOCK *sd = jcr->store_bsock;
uint64_t fileAddr = 0; /* file address */
fileAddr += sd->msglen;
/* Update checksum if requested */
- if (digest) {
- crypto_digest_update(digest, rbuf, sd->msglen);
- }
-
- /* Update signing digest if requested */
- if (signing_digest) {
- crypto_digest_update(signing_digest, rbuf, sd->msglen);
- }
+ chksum_update(chksum, (unsigned char *)rbuf, sd->msglen);
#ifdef HAVE_LIBZ
/* Do compression if turned on */
--- /dev/null
+/*
+ * General routines for handling the various checksum supported.
+ *
+ * Written by Preben 'Peppe' Guldberg, December MMIV
+ */
+/*
+ Copyright (C) 2004-2005 Kern Sibbald
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ version 2 as amended with additional clauses defined in the
+ file LICENSE in the main source directory.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ the file LICENSE for additional details.
+
+ */
+
+#include "bacula.h"
+#include "filed.h"
+
+/* return 0 on success, otherwise some handler specific error code. */
+int chksum_init(CHKSUM *chksum, int flags)
+{
+ int status = 0;
+
+ chksum->type = CHKSUM_NONE;
+ bstrncpy(chksum->name, "NONE", sizeof(chksum->name));
+ chksum->updated = false;
+ if (flags & CHKSUM_MD5) {
+ chksum->length = 16;
+ MD5Init(&chksum->context.md5);
+ chksum->type = CHKSUM_MD5;
+ bstrncpy(chksum->name, "MD5", sizeof(chksum->name));
+ } else if (flags & CHKSUM_SHA1) {
+ chksum->length = 20;
+ status = SHA1Init(&chksum->context.sha1);
+ if (status == 0) {
+ chksum->type = CHKSUM_SHA1;
+ bstrncpy(chksum->name, "SHA1", sizeof(chksum->name));
+ }
+ }
+ return status;
+}
+
+/* return 0 on success, otherwise some handler specific error code. */
+int chksum_update(CHKSUM *chksum, void *buf, unsigned len)
+{
+ int status;
+ switch (chksum->type) {
+ case CHKSUM_NONE:
+ return 0;
+ case CHKSUM_MD5:
+ MD5Update(&chksum->context.md5, (unsigned char *)buf, len);
+ chksum->updated = true;
+ return 0;
+ case CHKSUM_SHA1:
+ status = SHA1Update(&chksum->context.sha1, (uint8_t *)buf, len);
+ if (status == 0) {
+ chksum->updated = true;
+ }
+ return status;
+ default:
+ return -1;
+ }
+}
+
+/* return 0 on success, otherwise some handler specific error code. */
+int chksum_final(CHKSUM *chksum)
+{
+ switch (chksum->type) {
+ case CHKSUM_NONE:
+ return 0;
+ case CHKSUM_MD5:
+ MD5Final(chksum->signature, &chksum->context.md5);
+ return 0;
+ case CHKSUM_SHA1:
+ return SHA1Final(&chksum->context.sha1, chksum->signature);
+ default:
+ return -1;
+ }
+}
--- /dev/null
+/*
+ * General routines for handling the various checksum supported.
+ */
+/*
+ Copyright (C) 2000-2005 Kern Sibbald
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ version 2 as amended with additional clauses defined in the
+ file LICENSE in the main source directory.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ the file LICENSE for additional details.
+
+ */
+
+#ifndef _CHKSUM_H_
+#define _CHKSUM_H_
+
+#include "bacula.h"
+
+/*
+ * Link these to findlib options. Doing so allows for simpler handling of
+ * signatures in the callers.
+ * If multiple signatures are specified, the order in chksum_init() matters.
+ * Still, spell out our own names in case we want to change the approach.
+ */
+#define CHKSUM_NONE 0
+#define CHKSUM_MD5 FO_MD5
+#define CHKSUM_SHA1 FO_SHA1
+
+union chksumContext {
+ MD5Context md5;
+ SHA1Context sha1;
+};
+
+struct CHKSUM {
+ int type; /* One of CHKSUM_* above */
+ char name[5]; /* Big enough for NONE, MD5, SHA1, etc. */
+ bool updated; /* True if updated by chksum_update() */
+ chksumContext context; /* Context for the algorithm at hand */
+ int length; /* Length of signature */
+ unsigned char signature[30]; /* Large enough for either signature */
+};
+
+int chksum_init(CHKSUM *chksum, int flags);
+int chksum_update(CHKSUM *chksum, void *buf, unsigned len);
+int chksum_final(CHKSUM *chksum);
+
+#endif
parse_config(configfile);
- if (init_crypto() != 0) {
- Emsg0(M_ERROR, 0, _("Cryptography library initialization failed.\n"));
+ if (init_tls() != 0) {
+ Emsg0(M_ERROR, 0, _("TLS library initialization failed.\n"));
terminate_filed(1);
}
free_config_resources();
term_msg();
stop_watchdog();
- cleanup_crypto();
+ cleanup_tls();
close_memory_pool(); /* release free memory in pool */
sm_dump(false); /* dump orphaned buffers */
exit(sig);
OK = false;
}
}
-
- if (me->pki_encrypt || me->pki_sign) {
-#ifndef HAVE_CRYPTO
- Jmsg(NULL, M_FATAL, 0, _("PKI encryption/signing enabled but not compiled into Bacula.\n"));
- OK = false;
-#endif
- }
-
- /* pki_encrypt implies pki_sign */
- if (me->pki_encrypt) {
- me->pki_sign = true;
- }
-
- if ((me->pki_encrypt || me->pki_sign) && !me->pki_keypairfile) {
- Emsg2(M_FATAL, 0, _("\"PKI Key Pair\" must be defined for File"
- " daemon \"%s\" in %s if either \"PKI Sign\" or"
- " \"PKI Encrypt\" are enabled.\n"), me->hdr.name, configfile);
- OK = false;
- }
-
- /* If everything is well, attempt to initialize our public/private keys */
- if (OK && (me->pki_encrypt || me->pki_sign)) {
- char *filepath;
-
- /* Load our keypair */
- me->pki_keypair = crypto_keypair_new();
- if (!me->pki_keypair) {
- Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n"));
- OK = false;
- } else {
- if (!crypto_keypair_load_cert(me->pki_keypair, me->pki_keypairfile)) {
- Emsg2(M_FATAL, 0, _("Failed to load public certificate for File"
- " daemon \"%s\" in %s.\n"), me->hdr.name, configfile);
- OK = false;
- }
-
- if (!crypto_keypair_load_key(me->pki_keypair, me->pki_keypairfile, NULL, NULL)) {
- Emsg2(M_FATAL, 0, _("Failed to load private key for File"
- " daemon \"%s\" in %s.\n"), me->hdr.name, configfile);
- OK = false;
- }
- }
-
- /*
- * Trusted Signers. We're always trusted.
- */
- me->pki_signers = New(alist(10, not_owned_by_alist));
- me->pki_signers->append(crypto_keypair_dup(me->pki_keypair));
-
- /* If additional trusted keys have been specified, load them up */
- if (me->pki_trustedkeys) {
- foreach_alist(filepath, me->pki_trustedkeys) {
- X509_KEYPAIR *keypair;
-
- keypair = crypto_keypair_new();
- if (!keypair) {
- Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n"));
- OK = false;
- } else {
- if (crypto_keypair_load_cert(keypair, filepath)) {
- me->pki_signers->append(keypair);
- } else {
- Emsg3(M_FATAL, 0, _("Failed to load trusted signer certificate"
- " from file %s for File daemon \"%s\" in %s.\n"), filepath, me->hdr.name, configfile);
- OK = false;
- }
- }
- }
- }
-
- if (me->pki_encrypt) {
- /*
- * Trusted readers. We're always trusted.
- * The symmetric session key will be encrypted for each of these readers.
- */
- me->pki_readers = New(alist(10, not_owned_by_alist));
- me->pki_readers->append(crypto_keypair_dup(me->pki_keypair));
-
-
- /* If additional keys have been specified, load them up */
- if (me->pki_masterkeys) {
- foreach_alist(filepath, me->pki_masterkeys) {
- X509_KEYPAIR *keypair;
-
- keypair = crypto_keypair_new();
- if (!keypair) {
- Emsg0(M_FATAL, 0, _("Failed to allocate a new keypair object.\n"));
- OK = false;
- } else {
- if (crypto_keypair_load_cert(keypair, filepath)) {
- me->pki_signers->append(keypair);
- } else {
- Emsg3(M_FATAL, 0, _("Failed to load master key certificate"
- " from file %s for File daemon \"%s\" in %s.\n"), filepath, me->hdr.name, configfile);
- OK = false;
- }
- }
- }
- }
- }
- }
}
#define FILE_DAEMON 1
#include "filed_conf.h"
+#include "chksum.h"
#include "findlib/find.h"
#include "jcr.h"
#include "acl.h"
{"heartbeatinterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 0},
{"sdconnecttimeout", store_time,ITEM(res_client.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
{"maximumnetworkbuffersize", store_pint, ITEM(res_client.max_network_buffer_size), 0, 0, 0},
- {"pkisignatures", store_yesno, ITEM(res_client.pki_sign), 1, ITEM_DEFAULT, 0},
- {"pkiencryption", store_yesno, ITEM(res_client.pki_encrypt), 1, ITEM_DEFAULT, 0},
- {"pkikeypair", store_dir, ITEM(res_client.pki_keypairfile), 0, 0, 0},
- {"pkitrustedsigner", store_alist_str, ITEM(res_client.pki_trustedkeys), 0, 0, 0},
- {"pkimasterkey", store_alist_str, ITEM(res_client.pki_masterkeys), 0, 0, 0},
- {"tlsenable", store_yesno, ITEM(res_client.tls_enable), 1, 0, 0},
- {"tlsrequire", store_yesno, ITEM(res_client.tls_require), 1, 0, 0},
- {"tlscacertificatefile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0},
- {"tlscacertificatedir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0},
- {"tlscertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0},
- {"tlskey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0},
+ {"tlsenable", store_yesno, ITEM(res_client.tls_enable), 1, 0, 0},
+ {"tlsrequire", store_yesno, ITEM(res_client.tls_require), 1, 0, 0},
+ {"tlscacertificatefile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0},
+ {"tlscacertificatedir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0},
+ {"tlscertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0},
+ {"tlskey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0},
{NULL, NULL, NULL, 0, 0, 0}
};
if (res->res_client.FDaddrs) {
free_addresses(res->res_client.FDaddrs);
}
- if (res->res_client.pki_keypairfile) {
- free(res->res_client.pki_keypairfile);
- }
- if (res->res_client.pki_keypair) {
- crypto_keypair_free(res->res_client.pki_keypair);
- }
- /* Also frees res_client.pki_keypair */
- if (res->res_client.pki_trustedkeys) {
- delete res->res_client.pki_trustedkeys;
- }
- if (res->res_client.pki_signers) {
- X509_KEYPAIR *keypair;
- foreach_alist(keypair, res->res_client.pki_signers) {
- crypto_keypair_free(keypair);
- }
- delete res->res_client.pki_signers;
- }
- if (res->res_client.pki_masterkeys) {
- delete res->res_client.pki_masterkeys;
- }
- if (res->res_client.pki_readers) {
- X509_KEYPAIR *keypair;
- foreach_alist(keypair, res->res_client.pki_readers) {
- crypto_keypair_free(keypair);
- }
- delete res->res_client.pki_signers;
- }
-
if (res->res_client.tls_ctx) {
free_tls_context(res->res_client.tls_ctx);
}
if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_dir.hdr.name)) == NULL) {
Emsg1(M_ABORT, 0, _("Cannot find Client resource %s\n"), res_all.res_dir.hdr.name);
}
- res->res_client.pki_trustedkeys = res_all.res_client.pki_trustedkeys;
- res->res_client.pki_signers = res_all.res_client.pki_signers;
res->res_client.messages = res_all.res_client.messages;
break;
default:
utime_t heartbeat_interval; /* Interval to send heartbeats to Dir */
utime_t SDConnectTimeout; /* timeout in seconds */
uint32_t max_network_buffer_size; /* max network buf size */
- int pki_sign; /* Enable Data Integrity Verification via Digital Signatures */
- int pki_encrypt; /* Enable Data Encryption */
- char *pki_keypairfile; /* PKI Key Pair File */
- alist *pki_trustedkeys; /* PKI Trusted Public Keys */
- alist *pki_masterkeys; /* PKI Master Keys */
int tls_enable; /* Enable TLS */
int tls_require; /* Require TLS */
char *tls_ca_certfile; /* TLS CA Certificate File */
char *tls_certfile; /* TLS Client Certificate File */
char *tls_keyfile; /* TLS Client Key File */
- X509_KEYPAIR *pki_keypair; /* Shared PKI Public/Private Keypair */
- alist *pki_signers; /* Shared PKI Trusted Signers */
- alist *pki_readers; /* Shared PKI Recipients */
TLS_CONTEXT *tls_ctx; /* Shared TLS Context */
};
#include "filed.h"
#ifdef WIN32_VSS
#include "vss.h"
+static pthread_mutex_t vss_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
extern char my_name[];
jcr->last_fname[0] = 0;
jcr->client_name = get_memory(strlen(my_name) + 1);
pm_strcpy(jcr->client_name, my_name);
- jcr->pki_sign = me->pki_sign;
- jcr->pki_encrypt = me->pki_encrypt;
- jcr->pki_keypair = me->pki_keypair;
- jcr->pki_signers = me->pki_signers;
- jcr->pki_readers = me->pki_readers;
dir->jcr = jcr;
enable_backup_privileges(NULL, 1 /* ignore_errors */);
fo->flags |= FO_READFIFO;
break;
case 'S':
- switch(*(p + 1)) {
- case ' ':
- /* Old director did not specify SHA variant */
- fo->flags |= FO_SHA1;
- break;
- case '1':
- fo->flags |= FO_SHA1;
- p++;
- break;
-#ifdef HAVE_SHA2
- case '2':
- fo->flags |= FO_SHA256;
- p++;
- break;
- case '3':
- fo->flags |= FO_SHA512;
- p++;
- break;
-#endif
- default:
- /* Automatically downgrade to SHA-1 if an unsupported
- * SHA variant is specified */
- fo->flags |= FO_SHA1;
- p++;
- break;
- }
+ fo->flags |= FO_SHA1;
break;
case 's':
fo->flags |= FO_SPARSE;
#ifdef WIN32_VSS
/* START VSS ON WIN 32 */
if (g_pVSSClient && enable_vss) {
+ /* Run only one at a time */
+ P(vss_mutex);
if (g_pVSSClient->InitializeForBackup()) {
/* tell vss which drives to snapshot */
char szWinDriveLetters[27];
#ifdef WIN32_VSS
/* STOP VSS ON WIN 32 */
/* tell vss to close the backup session */
- if (g_pVSSClient && enable_vss == 1)
+ if (g_pVSSClient && enable_vss) {
g_pVSSClient->CloseBackup();
+ V(vss_mutex);
+ }
#endif
bnet_fsend(dir, EndJob, jcr->JobStatus, jcr->JobFiles,
/*
* Open Read Session with Storage daemon
*/
- bnet_fsend(sd, read_open, "DummyVolume",
+ bnet_fsend(sd, read_open, jcr->VolumeName,
jcr->VolSessionId, jcr->VolSessionTime, jcr->StartFile, jcr->EndFile,
jcr->StartBlock, jcr->EndBlock);
Dmsg1(110, ">stored: %s", sd->msg);
*/
extern bool blast_data_to_storage_daemon(JCR *jcr, char *addr);
+extern void do_verify(JCR *jcr);
extern void do_verify_volume(JCR *jcr);
extern void do_restore(JCR *jcr);
extern int authenticate_director(JCR *jcr);
extern int authenticate_storagedaemon(JCR *jcr);
extern int make_estimate(JCR *jcr);
-/* From verify.c */
-int digest_file(JCR *jcr, FF_PKT *ff_pkt, DIGEST *digest);
-void do_verify(JCR *jcr);
-
/* From heartbeat.c */
void start_heartbeat_monitor(JCR *jcr);
void stop_heartbeat_monitor(JCR *jcr);
#ifdef HAVE_LIBZ
static const char *zlib_strerror(int stat);
#endif
-
-int verify_signature(JCR *jcr, SIGNATURE *sig);
int32_t extract_data(JCR *jcr, BFILE *bfd, POOLMEM *buf, int32_t buflen,
uint64_t *addr, int flags);
BFILE altbfd; /* Alternative data stream */
uint64_t alt_addr = 0; /* Write address for alternative stream */
intmax_t alt_size = 0; /* Size of alternate stream */
- SIGNATURE *sig = NULL; /* Cryptographic signature (if any) for file */
int flags; /* Options for extract_data() */
int stat;
ATTR *attr;
* or c. Alternate data stream (e.g. Resource Fork)
* or d. Finder info
* or e. ACLs
- * or f. Possibly a cryptographic signature
- * or g. Possibly MD5 or SHA1 record
+ * or f. Possibly MD5 or SHA1 record
* 3. Repeat step 1
*
* NOTE: We keep track of two bacula file descriptors:
Dmsg1(30, "Stream=Unix Attributes. extract=%d\n", extract);
/*
* If extracting, it was from previous stream, so
- * close the output file and validate the signature.
+ * close the output file.
*/
if (extract) {
if (size > 0 && !is_bopen(&bfd)) {
}
set_attributes(jcr, attr, &bfd);
extract = false;
-
- /* Verify the cryptographic signature, if any */
- if (jcr->pki_sign) {
- if (sig) {
- if (!verify_signature(jcr, sig)) {
- // TODO landonf: Better signature failure handling.
- // The failure is reported to the director in verify_signature() ...
- Dmsg1(100, "Bad signature on %s\n", jcr->last_fname);
- } else {
- Dmsg1(100, "Signature good on %s\n", jcr->last_fname);
- }
- crypto_sign_free(sig);
- sig = NULL;
- } else {
- Jmsg1(jcr, M_ERROR, 0, _("Missing cryptographic signature for %s\n"), jcr->last_fname);
- }
- }
Dmsg0(30, "Stop extracting.\n");
} else if (is_bopen(&bfd)) {
Jmsg0(jcr, M_ERROR, 0, _("Logic error: output file should not be open\n"));
|| stream == STREAM_WIN32_GZIP_DATA) {
flags |= FO_GZIP;
}
-
- if (is_win32_stream(stream) && !have_win32_api()) {
- set_portable_backup(&bfd);
- flags |= FO_WIN32DECOMP; /* "decompose" BackupWrite data */
- }
-
if (extract_data(jcr, &bfd, sd->msg, sd->msglen, &fileAddr, flags) < 0) {
extract = false;
bclose(&bfd);
pm_strcpy(jcr->acl_text, sd->msg);
Dmsg2(400, "Restoring ACL type 0x%2x <%s>\n", BACL_TYPE_ACCESS, jcr->acl_text);
if (bacl_set(jcr, BACL_TYPE_ACCESS) != 0) {
- Qmsg1(jcr, M_WARNING, 0, _("Can't restore ACL of %s\n"), jcr->last_fname);
+ Jmsg1(jcr, M_WARNING, 0, _("Can't restore ACL of %s\n"), jcr->last_fname);
}
#else
non_support_acl++;
pm_strcpy(jcr->acl_text, sd->msg);
Dmsg2(400, "Restoring ACL type 0x%2x <%s>\n", BACL_TYPE_DEFAULT, jcr->acl_text);
if (bacl_set(jcr, BACL_TYPE_DEFAULT) != 0) {
- Qmsg1(jcr, M_WARNING, 0, _("Can't restore default ACL of %s\n"), jcr->last_fname);
+ Jmsg1(jcr, M_WARNING, 0, _("Can't restore default ACL of %s\n"), jcr->last_fname);
}
#else
non_support_acl++;
#endif
break;
- case STREAM_SIGNED_DIGEST:
- /* Save signature. */
- sig = crypto_sign_decode(sd->msg, (size_t) sd->msglen);
- break;
-
- case STREAM_MD5_DIGEST:
- case STREAM_SHA1_DIGEST:
- case STREAM_SHA256_DIGEST:
- case STREAM_SHA512_DIGEST:
+ case STREAM_MD5_SIGNATURE:
+ case STREAM_SHA1_SIGNATURE:
break;
case STREAM_PROGRAM_NAMES:
}
#endif
-static int do_file_digest(FF_PKT *ff_pkt, void *pkt, bool top_level) {
- JCR *jcr = (JCR *) pkt;
- return (digest_file(jcr, ff_pkt, jcr->digest));
-}
-
-/*
- * Verify the signature for the last restored file
- * Return value is either true (signature correct)
- * or false (signature could not be verified).
- */
-int verify_signature(JCR *jcr, SIGNATURE *sig)
-{
- X509_KEYPAIR *keypair;
- DIGEST *digest = NULL;
- crypto_error_t err;
-
-
- /* Iterate through the trusted signers */
- foreach_alist(keypair, jcr->pki_signers) {
- err = crypto_sign_get_digest(sig, jcr->pki_keypair, &digest);
-
- switch (err) {
- case CRYPTO_ERROR_NONE:
- /* Signature found, digest allocated */
- jcr->digest = digest;
-
- /* Checksum the entire file */
- if (find_one_file(jcr, jcr->ff, do_file_digest, jcr, jcr->last_fname, (dev_t)-1, 1) != 0) {
- Qmsg(jcr, M_ERROR, 0, _("Signature validation failed for %s: \n"), jcr->last_fname);
- return false;
- }
-
- /* Verify the signature */
- if ((err = crypto_sign_verify(sig, keypair, digest)) != CRYPTO_ERROR_NONE) {
- Qmsg2(jcr, M_ERROR, 0, _("Signature validation failed for %s: %s\n"), jcr->last_fname, crypto_strerror(err));
- crypto_digest_free(digest);
- return false;
- }
-
- /* Valid signature */
- crypto_digest_free(digest);
- return true;
-
- case CRYPTO_ERROR_NOSIGNER:
- /* Signature not found, try again */
- continue;
- default:
- /* Something strange happened (that shouldn't happen!)... */
- Qmsg2(jcr, M_ERROR, 0, _("Signature validation failed for %s: %s\n"), jcr->last_fname, crypto_strerror(err));
- if (digest) {
- crypto_digest_free(digest);
- }
- return false;
- }
- }
-
- /* Unreachable */
- return false;
-}
-
/*
* In the context of jcr, write data to bfd.
* We write buflen bytes in buf at addr. addr is updated in place.
Dmsg2(100, "Comp_len=%d msglen=%d\n", compress_len, wsize);
if ((stat=uncompress((Byte *)jcr->compress_buf, &compress_len,
(const Byte *)wbuf, (uLong)rsize)) != Z_OK) {
- Qmsg(jcr, M_ERROR, 0, _("Uncompression error on file %s. ERR=%s\n"),
+ Jmsg(jcr, M_ERROR, 0, _("Uncompression error on file %s. ERR=%s\n"),
jcr->last_fname, zlib_strerror(stat));
return -1;
}
wsize = compress_len;
Dmsg2(100, "Write uncompressed %d bytes, total before write=%s\n", compress_len, edit_uint64(jcr->JobBytes, ec1));
#else
- Qmsg(jcr, M_ERROR, 0, _("GZIP data stream found, but GZIP not configured!\n"));
+ Jmsg(jcr, M_ERROR, 0, _("GZIP data stream found, but GZIP not configured!\n"));
return -1;
#endif
} else {
Dmsg2(30, "Write %u bytes, total before write=%s\n", wsize, edit_uint64(jcr->JobBytes, ec1));
}
- if (flags & FO_WIN32DECOMP) {
- if (!processWin32BackupAPIBlock(bfd, wbuf, wsize)) {
- berrno be;
- Jmsg2(jcr, M_ERROR, 0, _("Write error in Win32 Block Decomposition on %s: %s\n"),
- jcr->last_fname, be.strerror(bfd->berrno));
- return -1;
- }
- } else if (bwrite(bfd, wbuf, wsize) != (ssize_t)wsize) {
+ if (bwrite(bfd, wbuf, wsize) != (ssize_t)wsize) {
berrno be;
Jmsg2(jcr, M_ERROR, 0, _("Write error on %s: %s\n"),
jcr->last_fname, be.strerror(bfd->berrno));
/*
- * Bacula File Daemon verify.c Verify files.
+ * Bacula File Daemon verify.c Verify files.
*
* Kern Sibbald, October MM
*
#include "filed.h"
static int verify_file(FF_PKT *ff_pkt, void *my_pkt, bool);
-static int read_digest(BFILE *bfd, DIGEST *digest, JCR *jcr);
+static int read_chksum(BFILE *bfd, CHKSUM *chksum, JCR *jcr);
/*
* Find all the requested files and send attributes
jcr->buf_size = DEFAULT_NETWORK_BUFFER_SIZE;
if ((jcr->big_buf = (char *) malloc(jcr->buf_size)) == NULL) {
Jmsg1(jcr, M_ABORT, 0, _("Cannot malloc %d network read buffer\n"),
- DEFAULT_NETWORK_BUFFER_SIZE);
+ DEFAULT_NETWORK_BUFFER_SIZE);
}
set_find_options((FF_PKT *)jcr->ff, jcr->incremental, jcr->mtime);
Dmsg0(10, "Start find files\n");
{
char attribs[MAXSTRING];
char attribsEx[MAXSTRING];
- int digest_stream = STREAM_NONE;
int stat;
- DIGEST *digest = NULL;
+ BFILE bfd;
+ struct CHKSUM chksum;
BSOCK *dir;
JCR *jcr = (JCR *)pkt;
}
dir = jcr->dir_bsock;
- jcr->num_files_examined++; /* bump total file count */
+ jcr->num_files_examined++; /* bump total file count */
switch (ff_pkt->type) {
- case FT_LNKSAVED: /* Hard linked, file already saved */
+ case FT_LNKSAVED: /* Hard linked, file already saved */
Dmsg2(30, "FT_LNKSAVED saving: %s => %s\n", ff_pkt->fname, ff_pkt->link);
break;
case FT_REGE:
Dmsg2(30, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link);
break;
case FT_DIRBEGIN:
- return 1; /* ignored */
+ return 1; /* ignored */
case FT_DIREND:
Dmsg1(30, "FT_DIR saving: %s\n", ff_pkt->fname);
break;
encode_attribsEx(jcr, attribsEx, ff_pkt);
P(jcr->mutex);
- jcr->JobFiles++; /* increment number of files sent */
+ jcr->JobFiles++; /* increment number of files sent */
pm_strcpy(jcr->last_fname, ff_pkt->fname);
V(jcr->mutex);
/*
* Send file attributes to Director
- * File_index
- * Stream
- * Verify Options
- * Filename (full path)
- * Encoded attributes
- * Link name (if type==FT_LNK)
+ * File_index
+ * Stream
+ * Verify Options
+ * Filename (full path)
+ * Encoded attributes
+ * Link name (if type==FT_LNK)
* For a directory, link is the same as fname, but with trailing
* slash. For a linked file, link is the link.
*/
Dmsg2(400, "send ATTR inx=%d fname=%s\n", jcr->JobFiles, ff_pkt->fname);
if (ff_pkt->type == FT_LNK || ff_pkt->type == FT_LNKSAVED) {
stat = bnet_fsend(dir, "%d %d %s %s%c%s%c%s%c", jcr->JobFiles,
- STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname,
- 0, attribs, 0, ff_pkt->link, 0);
+ STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname,
+ 0, attribs, 0, ff_pkt->link, 0);
} else if (ff_pkt->type == FT_DIREND) {
- /* Here link is the canonical filename (i.e. with trailing slash) */
+ /* Here link is the canonical filename (i.e. with trailing slash) */
stat = bnet_fsend(dir,"%d %d %s %s%c%s%c%c", jcr->JobFiles,
- STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->link,
- 0, attribs, 0, 0);
+ STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->link,
+ 0, attribs, 0, 0);
} else {
stat = bnet_fsend(dir,"%d %d %s %s%c%s%c%c", jcr->JobFiles,
- STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname,
- 0, attribs, 0, 0);
+ STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname,
+ 0, attribs, 0, 0);
}
Dmsg2(20, "bfiled>bdird: attribs len=%d: msg=%s\n", dir->msglen, dir->msg);
if (!stat) {
* First we initialise, then we read files, other streams and Finder Info.
*/
if (ff_pkt->type != FT_LNKSAVED && (S_ISREG(ff_pkt->statp.st_mode) &&
- ff_pkt->flags & (FO_MD5|FO_SHA1|FO_SHA256|FO_SHA512))) {
- /*
- * Create our digest context. If this fails, the digest will be set to NULL
- * and not used.
- */
- if (ff_pkt->flags & FO_MD5) {
- digest = crypto_digest_new(CRYPTO_DIGEST_MD5);
- digest_stream = STREAM_MD5_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA1) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA1);
- digest_stream = STREAM_SHA1_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA256) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA256);
- digest_stream = STREAM_SHA256_DIGEST;
-
- } else if (ff_pkt->flags & FO_SHA512) {
- digest = crypto_digest_new(CRYPTO_DIGEST_SHA512);
- digest_stream = STREAM_SHA512_DIGEST;
+ ff_pkt->flags & (FO_MD5|FO_SHA1))) {
+ chksum_init(&chksum, ff_pkt->flags);
+ binit(&bfd);
+
+ if (ff_pkt->statp.st_size > 0 || ff_pkt->type == FT_RAW
+ || ff_pkt->type == FT_FIFO) {
+ if ((bopen(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0)) < 0) {
+ ff_pkt->ff_errno = errno;
+ berrno be;
+ be.set_errno(bfd.berrno);
+ Jmsg(jcr, M_NOTSAVED, 1, _(" Cannot open %s: ERR=%s.\n"),
+ ff_pkt->fname, be.strerror());
+ jcr->Errors++;
+ return 1;
+ }
+ read_chksum(&bfd, &chksum, jcr);
+ bclose(&bfd);
}
- /* Did digest initialization fail? */
- if (digest_stream != STREAM_NONE && digest == NULL) {
- Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"),
- stream_to_ascii(digest_stream));
+#ifdef HAVE_DARWIN_OS
+ /* Open resource fork if necessary */
+ if (ff_pkt->flags & FO_HFSPLUS && ff_pkt->hfsinfo.rsrclength > 0) {
+ if (bopen_rsrc(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) {
+ ff_pkt->ff_errno = errno;
+ berrno be;
+ Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for %s: ERR=%s.\n"),
+ ff_pkt->fname, be.strerror());
+ jcr->Errors++;
+ if (is_bopen(&ff_pkt->bfd)) {
+ bclose(&ff_pkt->bfd);
+ }
+ return 1;
+ }
+ read_chksum(&bfd, &chksum, jcr);
+ bclose(&bfd);
+ }
+ if (ff_pkt->flags & FO_HFSPLUS) {
+ chksum_update(&chksum, ((unsigned char *)ff_pkt->hfsinfo.fndrinfo), 32);
}
+#endif
/* compute MD5 or SHA1 hash */
- if (digest) {
- char md[CRYPTO_DIGEST_MAX_SIZE];
- size_t size;
-
- size = sizeof(md);
-
- if (digest_file(jcr, ff_pkt, digest) != 0) {
- jcr->Errors++;
- return 1;
- }
-
- if (crypto_digest_finalize(digest, &md, &size)) {
- char *digest_buf;
- const char *digest_name;
-
- digest_buf = (char *) malloc(BASE64_SIZE(size));
- digest_name = crypto_digest_name(digest);
-
- bin_to_base64(digest_buf, (char *) md, size);
- Dmsg3(400, "send inx=%d %s=%s\n", jcr->JobFiles, digest_name, digest_buf);
- bnet_fsend(dir, "%d %d %s *%s-%d*", jcr->JobFiles, digest_stream, digest_buf,
- digest_name, jcr->JobFiles);
- Dmsg3(20, "bfiled>bdird: %s len=%d: msg=%s\n", digest_name,
- dir->msglen, dir->msg);
-
- free(digest_buf);
- }
-
- crypto_digest_free(digest);
+ if (chksum.updated) {
+ char chksumbuf[40]; /* 24 should do */
+ int stream = 0;
+
+ chksum_final(&chksum);
+ if (chksum.type == CHKSUM_MD5) {
+ stream = STREAM_MD5_SIGNATURE;
+ } else if (chksum.type == CHKSUM_SHA1) {
+ stream = STREAM_SHA1_SIGNATURE;
+ }
+ bin_to_base64(chksumbuf, (char *)chksum.signature, chksum.length);
+ Dmsg3(400, "send inx=%d %s=%s\n", jcr->JobFiles, chksum.name, chksumbuf);
+ bnet_fsend(dir, "%d %d %s *%s-%d*", jcr->JobFiles, stream, chksumbuf,
+ chksum.name, jcr->JobFiles);
+ Dmsg3(20, "bfiled>bdird: %s len=%d: msg=%s\n", chksum.name,
+ dir->msglen, dir->msg);
}
}
}
/*
- * Compute message digest for the file specified by ff_pkt.
- * In case of errors we need the job control record and file name.
- */
-int digest_file(JCR *jcr, FF_PKT *ff_pkt, DIGEST *digest)
-{
- BFILE bfd;
-
- binit(&bfd);
-
- if (ff_pkt->statp.st_size > 0 || ff_pkt->type == FT_RAW
- || ff_pkt->type == FT_FIFO) {
- if ((bopen(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0)) < 0) {
- ff_pkt->ff_errno = errno;
- berrno be;
- be.set_errno(bfd.berrno);
- Jmsg(jcr, M_NOTSAVED, 1, _(" Cannot open %s: ERR=%s.\n"),
- ff_pkt->fname, be.strerror());
- return 1;
- }
- read_digest(&bfd, digest, jcr);
- bclose(&bfd);
- }
-
-#ifdef HAVE_DARWIN_OS
- /* Open resource fork if necessary */
- if (ff_pkt->flags & FO_HFSPLUS && ff_pkt->hfsinfo.rsrclength > 0) {
- if (bopen_rsrc(&bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) {
- ff_pkt->ff_errno = errno;
- berrno be;
- Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for %s: ERR=%s.\n"),
- ff_pkt->fname, be.strerror());
- if (is_bopen(&ff_pkt->bfd)) {
- bclose(&ff_pkt->bfd);
- }
- return 1;
- }
- read_digest(&bfd, digest, jcr);
- bclose(&bfd);
- }
-
- if (digest && ff_pkt->flags & FO_HFSPLUS) {
- crypto_digest_update(digest, ff_pkt->hfsinfo.fndrinfo, 32);
- }
-#endif
-
- return 0;
-}
-
-/*
- * Read message digest of bfd, updating digest
+ * Read checksum of bfd, updating chksum
* In case of errors we need the job control record and file name.
*/
-int read_digest(BFILE *bfd, DIGEST *digest, JCR *jcr)
+int read_chksum(BFILE *bfd, CHKSUM *chksum, JCR *jcr)
{
- char buf[DEFAULT_NETWORK_BUFFER_SIZE];
int64_t n;
- while ((n=bread(bfd, &buf, sizeof(buf))) > 0) {
- crypto_digest_update(digest, &buf, n);
+ while ((n=bread(bfd, jcr->big_buf, jcr->buf_size)) > 0) {
+ chksum_update(chksum, ((unsigned char *)jcr->big_buf), (int)n);
jcr->JobBytes += n;
jcr->ReadBytes += n;
}
berrno be;
be.set_errno(bfd->berrno);
Jmsg(jcr, M_ERROR, 1, _("Error reading file %s: ERR=%s\n"),
- jcr->last_fname, be.strerror());
+ jcr->last_fname, be.strerror());
jcr->Errors++;
return -1;
}
uint32_t size;
uint32_t VolSessionId, VolSessionTime, file_index;
uint32_t record_file_index;
- char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)];
int type, stat;
sd = jcr->store_bsock;
case STREAM_WIN32_GZIP_DATA:
case STREAM_GZIP_DATA:
case STREAM_SPARSE_GZIP_DATA:
- case STREAM_SIGNED_DIGEST:
/* Do nothing */
break;
- case STREAM_MD5_DIGEST:
- bin_to_base64(digest, (char *)sd->msg, CRYPTO_DIGEST_MD5_SIZE);
- Dmsg2(400, "send inx=%d MD5=%s\n", jcr->JobFiles, digest);
- bnet_fsend(dir, "%d %d %s *MD5-%d*", jcr->JobFiles, STREAM_MD5_DIGEST, digest,
- jcr->JobFiles);
+ case STREAM_MD5_SIGNATURE:
+ char MD5buf[30];
+ bin_to_base64(MD5buf, (char *)sd->msg, 16); /* encode 16 bytes */
+ Dmsg2(400, "send inx=%d MD5=%s\n", jcr->JobFiles, MD5buf);
+ bnet_fsend(dir, "%d %d %s *MD5-%d*", jcr->JobFiles, STREAM_MD5_SIGNATURE, MD5buf,
+ jcr->JobFiles);
Dmsg2(20, "bfiled>bdird: MD5 len=%d: msg=%s\n", dir->msglen, dir->msg);
- break;
+ break;
- case STREAM_SHA1_DIGEST:
- bin_to_base64(digest, (char *)sd->msg, CRYPTO_DIGEST_SHA1_SIZE);
- Dmsg2(400, "send inx=%d SHA1=%s\n", jcr->JobFiles, digest);
- bnet_fsend(dir, "%d %d %s *SHA1-%d*", jcr->JobFiles, STREAM_SHA1_DIGEST,
- digest, jcr->JobFiles);
+ case STREAM_SHA1_SIGNATURE:
+ char SHA1buf[30];
+ bin_to_base64(SHA1buf, (char *)sd->msg, 20); /* encode 20 bytes */
+ Dmsg2(400, "send inx=%d SHA1=%s\n", jcr->JobFiles, SHA1buf);
+ bnet_fsend(dir, "%d %d %s *SHA1-%d*", jcr->JobFiles, STREAM_SHA1_SIGNATURE,
+ SHA1buf, jcr->JobFiles);
Dmsg2(20, "bfiled>bdird: SHA1 len=%d: msg=%s\n", dir->msglen, dir->msg);
- break;
-
- case STREAM_SHA256_DIGEST:
- bin_to_base64(digest, (char *)sd->msg, CRYPTO_DIGEST_SHA256_SIZE);
- Dmsg2(400, "send inx=%d SHA256=%s\n", jcr->JobFiles, digest);
- bnet_fsend(dir, "%d %d %s *SHA256-%d*", jcr->JobFiles, STREAM_SHA256_DIGEST,
- digest, jcr->JobFiles);
- Dmsg2(20, "bfiled>bdird: SHA256 len=%d: msg=%s\n", dir->msglen, dir->msg);
- break;
-
- case STREAM_SHA512_DIGEST:
- bin_to_base64(digest, (char *)sd->msg, CRYPTO_DIGEST_SHA512_SIZE);
- Dmsg2(400, "send inx=%d SHA512=%s\n", jcr->JobFiles, digest);
- bnet_fsend(dir, "%d %d %s *SHA512-%d*", jcr->JobFiles, STREAM_SHA512_DIGEST,
- digest, jcr->JobFiles);
- Dmsg2(20, "bfiled>bdird: SHA512 len=%d: msg=%s\n", dir->msglen, dir->msg);
- break;
+ break;
default:
Pmsg2(0, "None of above!!! stream=%d data=%s\n", stream,sd->msg);
Copyright (C) 2003-2005 Kern Sibbald
This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA.
*/
return _("File attributes");
case STREAM_FILE_DATA:
return _("File data");
- case STREAM_MD5_DIGEST:
- return _("MD5 digest");
+ case STREAM_MD5_SIGNATURE:
+ return _("MD5 signature");
case STREAM_UNIX_ATTRIBUTES_EX:
return _("Extended attributes");
case STREAM_SPARSE_DATA:
return _("Program names");
case STREAM_PROGRAM_DATA:
return _("Program data");
- case STREAM_SHA1_DIGEST:
- return _("SHA1 digest");
+ case STREAM_SHA1_SIGNATURE:
+ return _("SHA1 signature");
case STREAM_MACOS_FORK_DATA:
return _("HFS+ resource fork");
case STREAM_HFSPLUS_ATTRIBUTES:
return _("HFS+ Finder Info");
- case STREAM_SHA256_DIGEST:
- return _("SHA256 digest");
- case STREAM_SHA512_DIGEST:
- return _("SHA512 digest");
- case STREAM_SIGNED_DIGEST:
- return _("Signed digest");
default:
sprintf(buf, "%d", stream);
return (const char *)buf;
}
}
-
-void int64_LE2BE(int64_t* pBE, const int64_t v)
-{
- /* convert little endian to big endian */
- if (htonl(1) != 1L) { /* no work if on little endian machine */
- memcpy(pBE, &v, sizeof(int64_t));
- } else {
- int i;
- uint8_t rv[sizeof(int64_t)];
- uint8_t *pv = (uint8_t *) &v;
-
- for (i = 0; i < 8; i++) {
- rv[i] = pv[7 - i];
- }
- memcpy(pBE, &rv, sizeof(int64_t));
- }
-}
-
-
-void int32_LE2BE(int32_t* pBE, const int32_t v)
-{
- /* convert little endian to big endian */
- if (htonl(1) != 1L) { /* no work if on little endian machine */
- memcpy(pBE, &v, sizeof(int32_t));
- } else {
- int i;
- uint8_t rv[sizeof(int32_t)];
- uint8_t *pv = (uint8_t *) &v;
-
- for (i = 0; i < 4; i++) {
- rv[i] = pv[3 - i];
- }
- memcpy(pBE, &rv, sizeof(int32_t));
- }
-}
-
-
-bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize)
-{
- /* pByte contains the buffer
- dwSize the len to be processed. function assumes to be
- called in successive incremental order over the complete
- BackupRead stream beginning at pos 0 and ending at the end.
- */
-
- PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT* pContext = &(bfd->win32DecompContext);
- bool bContinue = false;
- int64_t dwDataOffset = 0;
- int64_t dwDataLen;
-
- /* Win32 Stream Header size without name of stream.
- * = sizeof (WIN32_STREAM_ID)- sizeof(WCHAR*);
- */
- int32_t dwSizeHeader = 20;
-
- do {
- if (pContext->liNextHeader >= dwSize) {
- dwDataLen = dwSize-dwDataOffset;
- bContinue = false; /* 1 iteration is enough */
- }
- else {
- dwDataLen = pContext->liNextHeader-dwDataOffset;
- bContinue = true; /* multiple iterations may be necessary */
- }
-
- /* flush */
- /* copy block of real DATA */
- if (pContext->bIsInData) {
- if (bwrite(bfd, ((char *)pBuffer)+dwDataOffset, dwDataLen) != (ssize_t)dwDataLen)
- return false;
- }
-
- if (pContext->liNextHeader < dwSize) {/* is a header in this block ? */
- int32_t dwOffsetTarget;
- int32_t dwOffsetSource;
-
- if (pContext->liNextHeader < 0) {
- /* start of header was before this block, so we
- * continue with the part in the current block
- */
- dwOffsetTarget = -pContext->liNextHeader;
- dwOffsetSource = 0;
- } else {
- /* start of header is inside of this block */
- dwOffsetTarget = 0;
- dwOffsetSource = pContext->liNextHeader;
- }
-
- int32_t dwHeaderPartLen = dwSizeHeader-dwOffsetTarget;
- bool bHeaderIsComplete;
-
- if (dwHeaderPartLen <= dwSize-dwOffsetSource)
- /* header (or rest of header) is completely available
- in current block
- */
- bHeaderIsComplete = true;
- else {
- /* header will continue in next block */
- bHeaderIsComplete = false;
- dwHeaderPartLen = dwSize-dwOffsetSource;
- }
-
- /* copy the available portion of header to persistent copy */
- memcpy(((char *)&pContext->header_stream)+dwOffsetTarget, ((char *)pBuffer)+dwOffsetSource, dwHeaderPartLen);
-
- /* recalculate position of next header */
- if (bHeaderIsComplete) {
- /* convert stream name size (32 bit little endian) to machine type */
- int32_t dwNameSize;
- int32_LE2BE (&dwNameSize, pContext->header_stream.dwStreamNameSize);
- dwDataOffset = dwNameSize+pContext->liNextHeader+dwSizeHeader;
-
- /* convert stream size (64 bit little endian) to machine type */
- int64_LE2BE (&(pContext->liNextHeader), pContext->header_stream.Size);
- pContext->liNextHeader += dwDataOffset;
-
- pContext->bIsInData = pContext->header_stream.dwStreamId == WIN32_BACKUP_DATA;
- if (dwDataOffset == dwSize)
- bContinue = false;
- }
- else {
- /* stop and continue with next block */
- bContinue = false;
- pContext->bIsInData = false;
- }
- }
- } while (bContinue);
-
- /* set "NextHeader" relative to the beginning of the next block */
- pContext->liNextHeader-= dwSize;
-
- return TRUE;
-}
-
/* ===============================================================
/*
- * Return true if we support the stream
- * false if we do not support the stream
- *
- * This code is running under Win32, so we
- * do not need #ifdef on MACOS ...
+ * Return 1 if we support the stream
+ * 0 if we do not support the stream
*/
bool is_restore_stream_supported(int stream)
{
+ /* No Win32 backup on this machine */
switch (stream) {
-
-/* Streams known not to be supported */
#ifndef HAVE_LIBZ
case STREAM_GZIP_DATA:
case STREAM_SPARSE_GZIP_DATA:
- case STREAM_WIN32_GZIP_DATA:
+ return 0;
#endif
+ case STREAM_WIN32_DATA:
+ case STREAM_WIN32_GZIP_DATA:
+ return have_win32_api();
+
case STREAM_MACOS_FORK_DATA:
case STREAM_HFSPLUS_ATTRIBUTES:
return false;
#ifdef HAVE_LIBZ
case STREAM_GZIP_DATA:
case STREAM_SPARSE_GZIP_DATA:
- case STREAM_WIN32_GZIP_DATA:
#endif
- case STREAM_WIN32_DATA:
case STREAM_UNIX_ATTRIBUTES:
case STREAM_FILE_DATA:
- case STREAM_MD5_DIGEST:
+ case STREAM_MD5_SIGNATURE:
case STREAM_UNIX_ATTRIBUTES_EX:
case STREAM_SPARSE_DATA:
case STREAM_PROGRAM_NAMES:
case STREAM_PROGRAM_DATA:
- case STREAM_SHA1_DIGEST:
-#ifdef HAVE_SHA2
- case STREAM_SHA256_DIGEST:
- case STREAM_SHA512_DIGEST:
-#endif
-#ifdef HAVE_CRYPTO
- case STREAM_SIGNED_DIGEST:
-#endif
+ case STREAM_SHA1_SIGNATURE:
case 0: /* compatibility with old tapes */
return true;
}
POOLMEM *win32_fname_wchar;
DWORD dwaccess, dwflags, dwshare;
-
+
/* Convert to Windows path format */
win32_fname = get_pool_memory(PM_FNAME);
win32_fname_wchar = get_pool_memory(PM_FNAME);
}
bfd->errmsg = NULL;
bfd->lpContext = NULL;
- bfd->win32DecompContext.bIsInData = false;
- bfd->win32DecompContext.liNextHeader = 0;
free_pool_memory(win32_fname_wchar);
free_pool_memory(win32_fname);
return bfd->mode == BF_CLOSED ? -1 : 1;
}
-/*
- * This code is running on a non-Win32 machine
- */
+
bool is_restore_stream_supported(int stream)
{
/* No Win32 backup on this machine */
- switch (stream) {
+ switch (stream) {
#ifndef HAVE_LIBZ
case STREAM_GZIP_DATA:
case STREAM_SPARSE_GZIP_DATA:
- case STREAM_WIN32_GZIP_DATA:
#endif
+ case STREAM_WIN32_DATA:
+ case STREAM_WIN32_GZIP_DATA:
#ifndef HAVE_DARWIN_OS
case STREAM_MACOS_FORK_DATA:
case STREAM_HFSPLUS_ATTRIBUTES:
#ifdef HAVE_LIBZ
case STREAM_GZIP_DATA:
case STREAM_SPARSE_GZIP_DATA:
- case STREAM_WIN32_GZIP_DATA:
#endif
- case STREAM_WIN32_DATA:
case STREAM_UNIX_ATTRIBUTES:
case STREAM_FILE_DATA:
- case STREAM_MD5_DIGEST:
+ case STREAM_MD5_SIGNATURE:
case STREAM_UNIX_ATTRIBUTES_EX:
case STREAM_SPARSE_DATA:
case STREAM_PROGRAM_NAMES:
case STREAM_PROGRAM_DATA:
- case STREAM_SHA1_DIGEST:
-#ifdef HAVE_SHA2
- case STREAM_SHA256_DIGEST:
- case STREAM_SHA512_DIGEST:
-#endif
+ case STREAM_SHA1_SIGNATURE:
#ifdef HAVE_DARWIN_OS
case STREAM_MACOS_FORK_DATA:
case STREAM_HFSPLUS_ATTRIBUTES:
#endif
- case 0: /* compatibility with old tapes */
+ case 0: /* compatibility with old tapes */
return true;
}
- return false;
+ return 0;
}
+/* Old file reader code */
+#ifdef xxx
+ if (bfd->prog) {
+ POOLMEM *ecmd = get_pool_memory(PM_FNAME);
+ ecmd = edit_job_codes(bfd->jcr, ecmd, bfd->prog, fname);
+ const char *pmode;
+ if (flags & O_RDONLY) {
+ pmode = "r";
+ } else {
+ pmode = "w";
+ }
+ bfd->bpipe = open_bpipe(ecmd, 0, pmode);
+ if (bfd->bpipe == NULL) {
+ bfd->berrno = errno;
+ bfd->fid = -1;
+ free_pool_memory(ecmd);
+ return -1;
+ }
+ free_pool_memory(ecmd);
+ if (flags & O_RDONLY) {
+ bfd->fid = fileno(bfd->bpipe->rfd);
+ } else {
+ bfd->fid = fileno(bfd->bpipe->wfd);
+ }
+ errno = 0;
+ return bfd->fid;
+ }
+#endif
+
+
int bopen(BFILE *bfd, const char *fname, int flags, mode_t mode)
{
/* Open reader/writer program */
bfd->berrno = errno;
Dmsg1(400, "Open file %d\n", bfd->fid);
errno = bfd->berrno;
-
- bfd->win32DecompContext.bIsInData = false;
- bfd->win32DecompContext.liNextHeader = 0;
-
return bfd->fid;
}
* Kern Sibbald May MMIII
*/
/*
- Copyright (C) 2003-2005 Kern Sibbald
+ Copyright (C) 2000-2005 Kern Sibbald
This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA.
*/
};
#endif
-
-/* this should physically correspond to WIN32_STREAM_ID
- * from winbase.h on Win32. We didn't inlcude cStreamName
- * as we don't use it and don't need it for a correct struct size.
- */
-
-#define WIN32_BACKUP_DATA 1
-
-typedef struct _BWIN32_STREAM_ID {
- int32_t dwStreamId;
- int32_t dwStreamAttributes;
- int64_t Size;
- int32_t dwStreamNameSize;
-} BWIN32_STREAM_ID, *LPBWIN32_STREAM_ID ;
-
-
-typedef struct _PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT {
- int64_t liNextHeader;
- bool bIsInData;
- BWIN32_STREAM_ID header_stream;
-} PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT;
-
/* =======================================================
*
* W I N D O W S
char *prog; /* reader/writer program if any */
JCR *jcr; /* jcr for editing job codes */
Python_IO pio; /* Python I/O routines */
- PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT win32DecompContext; /* context for decomposition of win32 backup streams */
- int use_backup_decomp; /* set if using BackupRead Stream Decomposition */
};
HANDLE bget_handle(BFILE *bfd);
char *prog; /* reader/writer program if any */
JCR *jcr; /* jcr for editing job codes */
Python_IO pio; /* Python I/O routines */
- PROCESS_WIN32_BACKUPAPIBLOCK_CONTEXT win32DecompContext; /* context for decomposition of win32 backup streams */
- int use_backup_decomp; /* set if using BackupRead Stream Decomposition */
};
#endif
off_t blseek(BFILE *bfd, off_t offset, int whence);
const char *stream_to_ascii(int stream);
-bool processWin32BackupAPIBlock (BFILE *bfd, void *pBuffer, ssize_t dwSize);
-
#endif /* __BFILE_H */
switch (replace) {
case REPLACE_IFNEWER:
if (attr->statp.st_mtime <= mstatp.st_mtime) {
- Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not newer: %s\n"), attr->ofname);
+ Jmsg(jcr, M_SKIPPED, 0, _("File skipped. Not newer: %s\n"), attr->ofname);
return CF_SKIP;
}
break;
case REPLACE_IFOLDER:
if (attr->statp.st_mtime >= mstatp.st_mtime) {
- Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Not older: %s\n"), attr->ofname);
+ Jmsg(jcr, M_SKIPPED, 0, _("File skipped. Not older: %s\n"), attr->ofname);
return CF_SKIP;
}
break;
case REPLACE_NEVER:
- Qmsg(jcr, M_SKIPPED, 0, _("File skipped. Already exists: %s\n"), attr->ofname);
+ Jmsg(jcr, M_SKIPPED, 0, _("File skipped. Already exists: %s\n"), attr->ofname);
return CF_SKIP;
case REPLACE_ALWAYS:
/* Get rid of old copy */
if (unlink(attr->ofname) == -1) {
berrno be;
- Qmsg(jcr, M_ERROR, 0, _("File %s already exists and could not be replaced. ERR=%s.\n"),
+ Jmsg(jcr, M_ERROR, 0, _("File %s already exists and could not be replaced. ERR=%s.\n"),
attr->ofname, be.strerror());
/* Continue despite error */
}
}
Dmsg1(50, "Create file: %s\n", attr->ofname);
if (is_bopen(bfd)) {
- Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
+ Jmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
bclose(bfd);
}
/*
attr->ofname[pnl] = 0; /* terminate path */
Dmsg1(000, "Do chdir %s\n", attr->ofname);
if (save_cwd(&cwd) != 0) {
- Qmsg0(jcr, M_ERROR, 0, _("Could not save_dirn"));
+ Jmsg0(jcr, M_ERROR, 0, _("Could not save_dirn"));
attr->ofname[pnl] = savechr;
return CF_ERROR;
}
*e = 0;
if (chdir(p) < 0) {
berrno be;
- Qmsg2(jcr, M_ERROR, 0, _("Could not chdir to %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Could not chdir to %s: ERR=%s\n"),
attr->ofname, be.strerror());
restore_cwd(&cwd, NULL, NULL);
free_cwd(&cwd);
}
if (chdir(p) < 0) {
berrno be;
- Qmsg2(jcr, M_ERROR, 0, _("Could not chdir to %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Could not chdir to %s: ERR=%s\n"),
attr->ofname, be.strerror());
restore_cwd(&cwd, NULL, NULL);
free_cwd(&cwd);
return CF_EXTRACT;
}
}
- Qmsg2(jcr, M_ERROR, 0, _("Could not create %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Could not create %s: ERR=%s\n"),
attr->ofname, be.strerror(bfd->berrno));
return CF_ERROR;
}
Dmsg1(200, "Restore fifo: %s\n", attr->ofname);
if (mkfifo(attr->ofname, attr->statp.st_mode) != 0 && errno != EEXIST) {
berrno be;
- Qmsg2(jcr, M_ERROR, 0, _("Cannot make fifo %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Cannot make fifo %s: ERR=%s\n"),
attr->ofname, be.strerror());
return CF_ERROR;
}
Dmsg1(200, "Restore node: %s\n", attr->ofname);
if (mknod(attr->ofname, attr->statp.st_mode, attr->statp.st_rdev) != 0 && errno != EEXIST) {
berrno be;
- Qmsg2(jcr, M_ERROR, 0, _("Cannot make node %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Cannot make node %s: ERR=%s\n"),
attr->ofname, be.strerror());
return CF_ERROR;
}
tid = NULL;
}
if (is_bopen(bfd)) {
- Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
+ Jmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
}
if ((bopen(bfd, attr->ofname, mode, 0)) < 0) {
berrno be;
be.set_errno(bfd->berrno);
- Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"),
attr->ofname, be.strerror());
stop_thread_timer(tid);
return CF_ERROR;
Dmsg2(130, "FT_LNK should restore: %s -> %s\n", attr->ofname, attr->olname);
if (symlink(attr->olname, attr->ofname) != 0 && errno != EEXIST) {
berrno be;
- Qmsg3(jcr, M_ERROR, 0, _("Could not symlink %s -> %s: ERR=%s\n"),
+ Jmsg3(jcr, M_ERROR, 0, _("Could not symlink %s -> %s: ERR=%s\n"),
attr->ofname, attr->olname, be.strerror());
return CF_ERROR;
}
Dmsg2(130, "Hard link %s => %s\n", attr->ofname, attr->olname);
if (link(attr->olname, attr->ofname) != 0) {
berrno be;
- Qmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"),
+ Jmsg3(jcr, M_ERROR, 0, _("Could not hard link %s -> %s: ERR=%s\n"),
attr->ofname, attr->olname, be.strerror());
return CF_ERROR;
}
*/
if (!is_portable_backup(bfd)) {
if (is_bopen(bfd)) {
- Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
+ Jmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid);
}
if ((bopen(bfd, attr->ofname, O_WRONLY|O_BINARY, 0)) < 0) {
berrno be;
return CF_SKIP;
}
#endif
- Qmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"),
+ Jmsg2(jcr, M_ERROR, 0, _("Could not open %s: ERR=%s\n"),
attr->ofname, be.strerror());
return CF_ERROR;
}
case FT_NORECURSE:
case FT_NOFSCHG:
case FT_NOOPEN:
- Qmsg2(jcr, M_ERROR, 0, _("Original file %s not saved: type=%d\n"), attr->fname, attr->type);
+ Jmsg2(jcr, M_ERROR, 0, _("Original file %s not saved: type=%d\n"), attr->fname, attr->type);
break;
default:
- Qmsg2(jcr, M_ERROR, 0, _("Unknown file type %d; not restored: %s\n"), attr->type, attr->fname);
+ Jmsg2(jcr, M_ERROR, 0, _("Unknown file type %d; not restored: %s\n"), attr->type, attr->fname);
break;
}
return CF_ERROR;
/* The filename length must not be zero here because we
* are dealing with a file (i.e. FT_REGE or FT_REG).
*/
- Qmsg1(jcr, M_ERROR, 0, _("Zero length filename: %s\n"), fname);
+ Jmsg1(jcr, M_ERROR, 0, _("Zero length filename: %s\n"), fname);
return -1;
}
pnl = f - ofile - 1;
Copyright (C) 2001-2005 Kern Sibbald
This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA.
*/
#define FO_NO_HARDLINK (1<<15) /* don't handle hard links */
#define FO_IGNORECASE (1<<16) /* Ignore file name case */
#define FO_HFSPLUS (1<<17) /* Resource forks and Finder Info */
-#define FO_WIN32DECOMP (1<<18) /* Use BackupRead decomposition */
-#define FO_SHA256 (1<<19) /* Do SHA256 checksum */
-#define FO_SHA512 (1<<20) /* Do SHA512 checksum */
struct s_included_file {
struct s_included_file *next;
2
3
main.c
-static-gnome-console
CONS_LIBS=$(GNOME_LIBS)
CONS_LDFLAGS=$(GNOME_LIBDIR) $(GNOMEUI_LIBS)
+OPENSSL_INC=@OPENSSL_INC@
+OPENSSL_LIBS=@OPENSSL_LIBS@
.SUFFIXES: .c .o
.PHONY:
# inference rules
.c.o:
- $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
+ $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CONS_INC) $(OPENSSL_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
#-------------------------------------------------------------------------
all: Makefile gnome-console @STATIC_GNOME_CONS@
@echo "==== Make of gnome-console is good ===="
rm -f support.c.orig
mv support.c support.c.orig
sed "s%parent = g_object_get_data%parent = \(GtkWidget \*\)g_object_get_data%" support.c.orig >support.c
- $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CONS_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
+ $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) $(CONS_INC) $(OPENSSL_INC) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) $<
gnome-console: $(CONSOBJS) ../lib/libbac.a
$(CXX) $(LDFLAGS) $(CONS_LDFLAGS) -L../lib -o $@ $(CONSOBJS) \
- $(LIBS) $(DLIB) $(CONS_LIBS) -lbac -lm
+ $(LIBS) $(DLIB) $(CONS_LIBS) -lbac -lm $(OPENSSL_LIBS)
static-console: static-gnome-console
static-gnome-console: $(CONSOBJS) ../lib/libbac.a
$(CXX) $(LDFLAGS) $(CONS_LDFLAGS) -L../lib -o $@ $(CONSOBJS) \
- $(LIBS) $(DLIB) $(CONS_LIBS) -lbac -lm
+ $(LIBS) $(DLIB) $(CONS_LIBS) -lbac -lm $(OPENSSL_LIBS)
strip $@
Makefile: $(srcdir)/Makefile.in $(topdir)/config.status
@$(MV) Makefile Makefile.bak
@$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile
@$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile
- @$(CXX) -S -M $(CPPFLAGS) $(XINC) -I$(srcdir) -I$(basedir) $(GNOME_INCLUDEDIR) $(SQL_INC) *.c >> Makefile
+ @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(OPENSSL_INC) -I$(srcdir) -I$(basedir) $(GNOME_INCLUDEDIR) $(SQL_INC) *.c >> Makefile
@if test -f Makefile ; then \
$(RMF) Makefile.bak; \
else \
parse_config(configfile);
- if (init_crypto() != 0) {
- Emsg0(M_ERROR_TERM, 0, _("Cryptography library initialization failed.\n"));
+ if (init_tls() != 0) {
+ Emsg0(M_ERROR_TERM, 0, _("TLS library initialization failed.\n"));
}
if (!check_resources()) {
if (already_here) /* avoid recursive temination problems */
exit(1);
already_here = true;
- cleanup_crypto();
+ cleanup_tls();
disconnect_from_director((gpointer)NULL);
gtk_main_quit();
exit(0);
#define JT_BACKUP 'B' /* Backup Job */
#define JT_VERIFY 'V' /* Verify Job */
#define JT_RESTORE 'R' /* Restore Job */
-#define JT_CONSOLE 'c' /* console program */
+#define JT_CONSOLE 'C' /* console program */
#define JT_SYSTEM 'I' /* internal system "job" */
#define JT_ADMIN 'D' /* admin job */
#define JT_ARCHIVE 'A' /* Archive Job */
-#define JT_COPY 'C' /* Copy Job */
+#define JT_COPY 'Y' /* Copy Job */
+#define JT_MIGRATION 'M' /* Migration Job */
#define JT_MIGRATE 'M' /* Migration Job */
#define JT_SCAN 'S' /* Scan Job */
volatile BSOCK *hb_bsock; /* duped SD socket */
volatile BSOCK *hb_dir_bsock; /* duped DIR socket */
POOLMEM *RunAfterJob; /* Command to run after job */
- bool pki_sign; /* Enable PKI Signatures? */
- bool pki_encrypt; /* Enable PKI Encryption? */
- DIGEST *digest; /* Last file's digest context */
- X509_KEYPAIR *pki_keypair; /* Encryption key pair */
- alist *pki_signers; /* Trusted Signers */
- alist *pki_readers; /* Trusted Readers */
- CRYPTO_RECIPIENTS *pki_recipients; /* PKE Public Keys + Symmetric Session Keys */
DIRRES* director; /* Director resource */
#endif /* FILE_DAEMON */
# this dir relative to top dir
thisdir = src/lib
-OPENSSL_LIBS = @OPENSSL_LIBS@
-OPENSSL_INC = @OPENSSL_INC@
DEBUG=@DEBUG@
LIBSRCS = alloc.c attr.c base64.c berrno.c bsys.c bget_msg.c \
bnet.c bnet_server.c \
- bpipe.c bshm.c bsnprintf.c btime.c \
- cram-md5.c crc32.c crypto.c daemon.c edit.c fnmatch.c \
+ bpipe.c bshm.c btime.c \
+ cram-md5.c crc32.c daemon.c edit.c fnmatch.c \
hmac.c idcache.c jcr.c lex.c alist.c dlist.c \
- md5.c message.c mem_pool.c openssl.c parse_conf.c \
+ md5.c message.c mem_pool.c parse_conf.c \
queue.c regex.c \
res.c rwlock.c scan.c serial.c sha1.c \
semlock.c signal.c smartall.c tls.c tree.c \
LIBOBJS = alloc.o attr.o base64.o berrno.o bsys.o bget_msg.o \
bnet.o bnet_server.o \
- bpipe.o bshm.o bsnprintf.o btime.o \
- cram-md5.o crc32.o crypto.o daemon.o edit.o fnmatch.o \
+ bpipe.o bshm.o btime.o \
+ cram-md5.o crc32.o daemon.o edit.o fnmatch.o \
hmac.o idcache.o jcr.o lex.o alist.o dlist.o \
- md5.o message.o mem_pool.o openssl.o parse_conf.o \
+ md5.o message.o mem_pool.o parse_conf.o \
queue.o regex.o \
res.o rwlock.o scan.o serial.o sha1.o \
semlock.o signal.o smartall.o tls.o tree.o \
EXTRAOBJS = @OBJLIST@
+OPENSSL_LIBS=@OPENSSL_LIBS@
+OPENSSL_INC=@OPENSSL_INC@
+
.SUFFIXES: .c .o .ch .dvi .pdf .tex .view .w .1
.PHONY:
$(CXX) -DSHA1_SUM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c
$(CXX) $(LDFLAGS) -L. -o $@ sha1.o $(LIBS) $(DLIB) -lbac $(OPENSSL_LIBS) -lm
rm -f sha1.o
- $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) sha1.c
-
-bsnprintf: bsnprintf.o
- rm -f bsnprintf.o
- $(CXX) -DTEST_PROGRAM $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsnprintf.c
- $(CXX) $(LDFLAGS) -L. -o $@ bsnprintf.o $(LIBS) $(DLIB) -lbac $(OPENSSL_LIBS) -lm
- rm -f bsnprintf.o
- $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) bsnprintf.c
-
+ $(CXX) $(DEFS) $(DEBUG) -c $(CPPFLAGS) -I$(srcdir) -I$(basedir) $(DINCLUDE) $(CFLAGS) md5.c
install:
# `semi'-automatic since dependencies are generated at distribution time.
depend:
+ @$(RMF) -f Makefile.bak
@$(MV) Makefile Makefile.bak
@$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile
@$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile
- @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(PYTHON_INC) -I$(srcdir) -I$(basedir) $(SQL_INC) *.c >> Makefile
+ @$(CXX) -S -M $(CPPFLAGS) $(XINC) $(PYTHON_INC) -I$(srcdir) -I$(basedir) $(SQL_INC) *.c >>Makefile
@if test -f Makefile ; then \
$(RMF) Makefile.bak; \
else \
$(MV) Makefile.bak Makefile; \
- echo -e "Something went wrong\n\a"; \
+ echo " ===== Something went wrong in make depend ====="; \
fi
+
# -----------------------------------------------------------------------
-# DO NOT DELETE: nice dependency list follows
+# DO NOT DELETE: nice dependency list follows
+++ /dev/null
-/*
- * Generic base 64 input and output routines
- *
- * Written by Kern E. Sibbald, March MM.
- *
- * Version $Id$
- */
-
-/*
- Copyright (C) 2000-2005 Kern Sibbald and John Walker
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of
- the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA.
-
- */
-
-/* Maximum size of len bytes after base64 encoding */
-#define BASE64_SIZE(len) (((len + 3 - (len % 3)) / 3) * 4)
--- /dev/null
+/*
+ * Network Packet Utility Routines
+ *
+ * by Kern Sibbald, July MMII
+ *
+ *
+ * Version $Id$
+ */
+/*
+ Copyright (C) 2002-2004 Kern Sibbald and John Walker
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ MA 02111-1307, USA.
+
+ */
+
+#ifdef implemented
+
+#include "bacula.h"
+
+/*
+ * Receive a message from the other end. Each message consists of
+ * two packets. The first is a header that contains the size
+ * of the data that follows in the second packet.
+ * Returns number of bytes read
+ * Returns 0 on end of file
+ * Returns -1 on hard end of file (i.e. network connection close)
+ * Returns -2 on error
+ */
+int32_t
+bnet_recv_pkt(BSOCK *bsock, BPKT *pkt, int *version)
+{
+ unser_declare;
+ short lversion;
+ int type;
+
+ unser_begin(bsock->msg, 0);
+ unser_uint16(lversion);
+ *version = (int)lversion;
+
+
+ for ( ; pkt->type != BP_EOF; pkt++) {
+ if (pkt->id) {
+ ser_int8(BP_ID);
+ ser_string((char *)pkt->id);
+ }
+ ser_int8(pkt->type);
+ switch (pkt->type) {
+ case BP_CHAR:
+ ser_int8(*(int8_t *)pkt->value);
+ break;
+ case BP_INT32:
+ ser_int32(*(int32_t *)pkt->value);
+ break;
+ case BP_UINT32:
+ break;
+ ser_unit32(*(uint32_t *)pkt->value);
+ break;
+ case BP_INT64:
+ ser_int64(*(int64_t *)pkt->value);
+ break;
+ case BP_BTIME:
+ case BP_UTIME:
+ case BP_UINT64:
+ ser_uint64(*(uint64_t *)pkt->value);
+ break;
+ case BP_POOL:
+ case BP_STRING:
+ case BP_NAME:
+ ser_string((char *)pkt->value);
+ break;
+ case BP_BYTES:
+ ser_uint32(*(uint32_t *)pkt->len);
+ ser_bytes((char *)pkt->value, pkt->len);
+ break;
+ default:
+ Emsg1(M_ABORT, 0, _("Unknown BPKT type: %d\n"), pkt->type);
+ }
+ }
+ unser_end(bsock->msg, 0);
+
+}
+
+/*
+ * Send a message over the network. The send consists of
+ * two network packets. The first is sends a 32 bit integer containing
+ * the length of the data packet which follows.
+ *
+ * Returns: 0 on failure
+ * 1 on success
+ */
+int
+bnet_send_pkt(BSOCK *bsock, BPKT *pkt, int version)
+{
+ ser_declare;
+
+ ser_begin(bsock->msg, 0);
+ ser_uint16(version);
+
+ for ( ; pkt->type != BP_EOF; pkt++) {
+ if (pkt->id) {
+ ser_int8(BP_ID);
+ ser_string((char *)pkt->id);
+ }
+ ser_int8(pkt->type);
+ switch (pkt->type) {
+ case BP_CHAR:
+ ser_int8(*(int8_t *)pkt->value);
+ break;
+ case BP_INT32:
+ ser_int32(*(int32_t *)pkt->value);
+ break;
+ case BP_UINT32:
+ break;
+ ser_unit32(*(uint32_t *)pkt->value);
+ break;
+ case BP_INT64:
+ ser_int64(*(int64_t *)pkt->value);
+ break;
+ case BP_BTIME:
+ case BP_UTIME:
+ case BP_UINT64:
+ ser_uint64(*(uint64_t *)pkt->value);
+ break;
+ case BP_POOL:
+ case BP_STRING:
+ case BP_NAME:
+ ser_string((char *)pkt->value);
+ break;
+ case BP_BYTES:
+ ser_uint32(*(uint32_t *)pkt->len);
+ ser_bytes((char *)pkt->value, pkt->len);
+ break;
+ default:
+ Emsg1(M_ABORT, 0, _("Unknown BPKT type: %d\n"), pkt->type);
+ }
+ }
+ ser_end(bsock->msg, 0);
+}
+
+#endif
+++ /dev/null
-/*
- * Copyright Patrick Powell 1995
- *
- * This code is based on code written by Patrick Powell
- * (papowell@astart.com) It may be used for any purpose as long
- * as this notice remains intact on all source code distributions.
- *
- * Adapted for Bacula -- note there were lots of bugs in
- * the original code: %lld and %s were seriously broken, and
- * with FP turned off %f seg faults.
- * Kern Sibbald, November MMV
- *
- * Version $Id$
- */
-
-
-#include "bacula.h"
-#define FP_OUTPUT 1 /* Bacula uses floating point */
-
-/*
- Temp only for me -- NOT YET READY FOR USE -- seems to work fine
- on Linux, but doesn't build correctly on Win32
- */
-#ifdef USE_BSNPRINTF
-
-#ifdef HAVE_LONG_DOUBLE
-#define LDOUBLE long double
-#else
-#define LDOUBLE double
-#endif
-
-int bvsnprintf(char *buffer, int32_t maxlen, const char *format, va_list args);
-static int32_t fmtstr(char *buffer, int32_t currlen, int32_t maxlen,
- char *value, int flags, int min, int max);
-static int32_t fmtint(char *buffer, int32_t currlen, int32_t maxlen,
- int64_t value, int base, int min, int max, int flags);
-
-#ifdef FP_OUTPUT
-# ifdef HAVE_FCVTL
-# define fcvt fcvtl
-# endif
-static int32_t fmtfp(char *buffer, int32_t currlen, int32_t maxlen,
- LDOUBLE fvalue, int min, int max, int flags);
-#else
-#define fmtfp(b, c, m, f, min, max, fl) currlen
-#endif
-
-#define outch(c) {int len=currlen; if (currlen++ < maxlen) { buffer[len] = (c);}}
-
-
-/* format read states */
-#define DP_S_DEFAULT 0
-#define DP_S_FLAGS 1
-#define DP_S_MIN 2
-#define DP_S_DOT 3
-#define DP_S_MAX 4
-#define DP_S_MOD 5
-#define DP_S_CONV 6
-#define DP_S_DONE 7
-
-/* format flags - Bits */
-#define DP_F_MINUS (1 << 0)
-#define DP_F_PLUS (1 << 1)
-#define DP_F_SPACE (1 << 2)
-#define DP_F_NUM (1 << 3)
-#define DP_F_ZERO (1 << 4)
-#define DP_F_UP (1 << 5)
-#define DP_F_UNSIGNED (1 << 6)
-#define DP_F_DOT (1 << 7)
-
-/* Conversion Flags */
-#define DP_C_INT16 1
-#define DP_C_INT32 2
-#define DP_C_LDOUBLE 3
-#define DP_C_INT64 4
-
-#define char_to_int(p) ((p)- '0')
-#define MAX(p,q) (((p) >= (q)) ? (p) : (q))
-
-/*
- You might ask why does Bacula have it's own printf routine? Well,
- There are two reasons: 1. Here (as opposed to library routines), we
- define %d and %ld to be 32 bit; %lld and %q to be 64 bit. 2. We
- disable %n for security reasons.
- */
-
-int bsnprintf(char *str, int32_t size, const char *fmt, ...)
-{
- va_list arg_ptr;
- int len;
-
- va_start(arg_ptr, fmt);
- len = bvsnprintf(str, size, fmt, arg_ptr);
- va_end(arg_ptr);
- return len;
-}
-
-
-int bvsnprintf(char *buffer, int32_t maxlen, const char *format, va_list args)
-{
- char ch;
- int64_t value;
- char *strvalue;
- int min;
- int max;
- int state;
- int flags;
- int cflags;
- int32_t currlen;
- int base;
- int junk;
-#ifdef FP_OUTPUT
- LDOUBLE fvalue;
-#endif
-
- state = DP_S_DEFAULT;
- currlen = flags = cflags = min = 0;
- max = -1;
- ch = *format++;
- *buffer = 0;
-
- while (state != DP_S_DONE) {
- if ((ch == '\0') || (currlen >= maxlen))
- state = DP_S_DONE;
-
- switch (state) {
- case DP_S_DEFAULT:
- if (ch == '%') {
- state = DP_S_FLAGS;
- } else {
- outch(ch);
- }
- ch = *format++;
- break;
- case DP_S_FLAGS:
- switch (ch) {
- case '-':
- flags |= DP_F_MINUS;
- ch = *format++;
- break;
- case '+':
- flags |= DP_F_PLUS;
- ch = *format++;
- break;
- case ' ':
- flags |= DP_F_SPACE;
- ch = *format++;
- break;
- case '#':
- flags |= DP_F_NUM;
- ch = *format++;
- break;
- case '0':
- flags |= DP_F_ZERO;
- ch = *format++;
- break;
- default:
- state = DP_S_MIN;
- break;
- }
- break;
- case DP_S_MIN:
- if (isdigit((unsigned char)ch)) {
- min = 10 * min + char_to_int(ch);
- ch = *format++;
- } else if (ch == '*') {
-#ifdef SECURITY_PROBLEM
- min = va_arg(args, int);
-#else
- junk = va_arg(args, int);
-#endif
- ch = *format++;
- state = DP_S_DOT;
- } else
- state = DP_S_DOT;
- break;
- case DP_S_DOT:
- if (ch == '.') {
- state = DP_S_MAX;
- flags |= DP_F_DOT;
- ch = *format++;
- } else
- state = DP_S_MOD;
- break;
- case DP_S_MAX:
- if (isdigit((unsigned char)ch)) {
- if (max < 0)
- max = 0;
- max = 10 * max + char_to_int(ch);
- ch = *format++;
- } else if (ch == '*') {
-#ifdef SECURITY_PROBLEM
- max = va_arg(args, int);
-#else
- junk = va_arg(args, int);
-#endif
- ch = *format++;
- state = DP_S_MOD;
- } else
- state = DP_S_MOD;
- break;
- case DP_S_MOD:
- switch (ch) {
- case 'h':
- cflags = DP_C_INT16;
- ch = *format++;
- break;
- case 'l':
- cflags = DP_C_INT32;
- ch = *format++;
- if (ch == 'l') { /* It's a long long */
- cflags = DP_C_INT64;
- ch = *format++;
- }
- break;
- case 'L':
- cflags = DP_C_LDOUBLE;
- ch = *format++;
- break;
- default:
- break;
- }
- state = DP_S_CONV;
- break;
- case DP_S_CONV:
- switch (ch) {
- case 'd':
- case 'i':
- if (cflags == DP_C_INT16) {
- value = va_arg(args, int32_t);
- } else if (cflags == DP_C_INT32) {
- value = va_arg(args, int32_t);
- } else if (cflags == DP_C_INT64) {
- value = va_arg(args, int64_t);
- } else {
- value = va_arg(args, int);
- }
- currlen = fmtint(buffer, currlen, maxlen, value, 10, min, max, flags);
- break;
- case 'X':
- case 'x':
- case 'o':
- case 'u':
- if (ch == 'o') {
- base = 8;
- } else if (ch == 'x') {
- base = 16;
- } else if (ch == 'X') {
- base = 16;
- flags |= DP_F_UP;
- } else {
- base = 10;
- }
- flags |= DP_F_UNSIGNED;
- if (cflags == DP_C_INT16) {
- value = va_arg(args, uint32_t);
- } else if (cflags == DP_C_INT32) {
- value = (long)va_arg(args, uint32_t);
- } else if (cflags == DP_C_INT64) {
- value = (int64_t) va_arg(args, uint64_t);
- } else {
- value = (long)va_arg(args, unsigned int);
- }
- currlen = fmtint(buffer, currlen, maxlen, value, base, min, max, flags);
- break;
- case 'f':
- if (cflags == DP_C_LDOUBLE) {
- fvalue = va_arg(args, LDOUBLE);
- } else {
- fvalue = va_arg(args, double);
- }
- currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags);
- break;
- case 'E':
- flags |= DP_F_UP;
- case 'e':
- if (cflags == DP_C_LDOUBLE) {
- fvalue = va_arg(args, LDOUBLE);
- } else {
- fvalue = va_arg(args, double);
- }
- currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags);
- break;
- case 'G':
- flags |= DP_F_UP;
- case 'g':
- if (cflags == DP_C_LDOUBLE) {
- fvalue = va_arg(args, LDOUBLE);
- } else {
- fvalue = va_arg(args, double);
- }
- currlen = fmtfp(buffer, currlen, maxlen, fvalue, min, max, flags);
- break;
- case 'c':
- outch(va_arg(args, int));
- break;
- case 's':
- strvalue = va_arg(args, char *);
- currlen = fmtstr(buffer, currlen, maxlen, strvalue, flags, min, max);
- break;
- case 'p':
- strvalue = va_arg(args, char *);
- currlen = fmtint(buffer, currlen, maxlen, (long)strvalue, 16, min, max, flags);
- break;
- case 'n':
- if (cflags == DP_C_INT16) {
- int16_t *num;
- num = va_arg(args, int16_t *);
-#ifdef SECURITY_PROBLEM
- *num = currlen;
-#endif
- } else if (cflags == DP_C_INT32) {
- int32_t *num;
- num = va_arg(args, int32_t *);
-#ifdef SECURITY_PROBLEM
- *num = (int32_t)currlen;
-#endif
- } else if (cflags == DP_C_INT64) {
- int64_t *num;
- num = va_arg(args, int64_t *);
-#ifdef SECURITY_PROBLEM
- *num = (int64_t)currlen;
-#endif
- } else {
- int32_t *num;
- num = va_arg(args, int32_t *);
-#ifdef SECURITY_PROBLEM
- *num = (int32_t)currlen;
-#endif
- }
- break;
- case '%':
- outch(ch);
- break;
- case 'w':
- /* not supported yet, treat as next char */
- ch = *format++;
- break;
- default:
- /* Unknown, skip */
- break;
- }
- ch = *format++;
- state = DP_S_DEFAULT;
- flags = cflags = min = 0;
- max = -1;
- break;
- case DP_S_DONE:
- break;
- default:
- /* hmm? */
- break; /* some picky compilers need this */
- }
- }
- if (currlen < maxlen - 1) {
- buffer[currlen] = '\0';
- } else {
- buffer[maxlen - 1] = '\0';
- }
- return currlen;
-}
-
-static int32_t fmtstr(char *buffer, int32_t currlen, int32_t maxlen,
- char *value, int flags, int min, int max)
-{
- int padlen, strln; /* amount to pad */
- int cnt = 0;
-
- if (value == 0) {
- value = "<NULL>";
- }
-
- if (flags & DP_F_DOT && max < 0) { /* Max not specified */
- max = 0;
- } else if (max < 0) {
- max = maxlen;
- }
- strln = strlen(value);
- if (strln > max) {
- strln = max; /* truncate to max */
- }
- padlen = min - strln;
- if (padlen < 0) {
- padlen = 0;
- }
- if (flags & DP_F_MINUS) {
- padlen = -padlen; /* Left Justify */
- }
-
- while (padlen > 0) {
- outch(' ');
- --padlen;
- }
- while (*value && (cnt < max)) {
- outch(*value++);
- ++cnt;
- }
- while (padlen < 0) {
- outch(' ');
- ++padlen;
- }
- return currlen;
-}
-
-/* Have to handle DP_F_NUM (ie 0x and 0 alternates) */
-
-static int32_t fmtint(char *buffer, int32_t currlen, int32_t maxlen,
- int64_t value, int base, int min, int max, int flags)
-{
- int signvalue = 0;
- uint64_t uvalue;
- char convert[20];
- int place = 0;
- int spadlen = 0; /* amount to space pad */
- int zpadlen = 0; /* amount to zero pad */
- int caps = 0;
-
- if (max < 0) {
- max = 0;
- }
-
- uvalue = value;
-
- if (!(flags & DP_F_UNSIGNED)) {
- if (value < 0) {
- signvalue = '-';
- uvalue = -value;
- } else if (flags & DP_F_PLUS) { /* Do a sign (+/i) */
- signvalue = '+';
- } else if (flags & DP_F_SPACE) {
- signvalue = ' ';
- }
- }
-
- if (flags & DP_F_UP) {
- caps = 1; /* Should characters be upper case? */
- }
-
- do {
- convert[place++] = (caps ? "0123456789ABCDEF" : "0123456789abcdef")
- [uvalue % (unsigned)base];
- uvalue = (uvalue / (unsigned)base);
- } while (uvalue && (place < 20));
- if (place == 20) {
- place--;
- }
- convert[place] = 0;
-
- zpadlen = max - place;
- spadlen = min - MAX(max, place) - (signvalue ? 1 : 0);
- if (zpadlen < 0)
- zpadlen = 0;
- if (spadlen < 0)
- spadlen = 0;
- if (flags & DP_F_ZERO) {
- zpadlen = MAX(zpadlen, spadlen);
- spadlen = 0;
- }
- if (flags & DP_F_MINUS)
- spadlen = -spadlen; /* Left Justifty */
-
-#ifdef DEBUG_SNPRINTF
- printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n",
- zpadlen, spadlen, min, max, place);
-#endif
-
- /* Spaces */
- while (spadlen > 0) {
- outch(' ');
- --spadlen;
- }
-
- /* Sign */
- if (signvalue) {
- outch(signvalue);
- }
-
- /* Zeros */
- if (zpadlen > 0) {
- while (zpadlen > 0) {
- outch('0');
- --zpadlen;
- }
- }
-
- /* Digits */
- while (place > 0) {
- outch(convert[--place]);
- }
-
- /* Left Justified spaces */
- while (spadlen < 0) {
- outch(' ');
- ++spadlen;
- }
- return currlen;
-}
-
-#ifdef FP_OUTPUT
-
-static LDOUBLE abs_val(LDOUBLE value)
-{
- LDOUBLE result = value;
-
- if (value < 0)
- result = -value;
-
- return result;
-}
-
-static LDOUBLE pow10(int exp)
-{
- LDOUBLE result = 1;
-
- while (exp) {
- result *= 10;
- exp--;
- }
-
- return result;
-}
-
-static long round(LDOUBLE value)
-{
- long intpart;
-
- intpart = (long)value;
- value = value - intpart;
- if (value >= 0.5)
- intpart++;
-
- return intpart;
-}
-
-static int32_t fmtfp(char *buffer, int32_t currlen, int32_t maxlen,
- LDOUBLE fvalue, int min, int max, int flags)
-{
- int signvalue = 0;
- LDOUBLE ufvalue;
-#ifndef HAVE_FCVT
- char iconvert[20];
- char fconvert[20];
-#else
- char iconvert[311];
- char fconvert[311];
- char *result;
- int dec_pt, sig;
- int r_length;
- extern char *fcvt(double value, int ndigit, int *decpt, int *sign);
-#endif
- int iplace = 0;
- int fplace = 0;
- int padlen = 0; /* amount to pad */
- int zpadlen = 0;
- int caps = 0;
- int64_t intpart;
- int64_t fracpart;
-
- /*
- * AIX manpage says the default is 0, but Solaris says the default
- * is 6, and sprintf on AIX defaults to 6
- */
- if (max < 0)
- max = 6;
-
- ufvalue = abs_val(fvalue);
-
- if (fvalue < 0)
- signvalue = '-';
- else if (flags & DP_F_PLUS) /* Do a sign (+/i) */
- signvalue = '+';
- else if (flags & DP_F_SPACE)
- signvalue = ' ';
-
-#if 0
- if (flags & DP_F_UP)
- caps = 1; /* Should characters be upper case? */
-#endif
-
-#ifndef HAVE_FCVT
- intpart = (long)ufvalue;
-
- /*
- * Sorry, we only support 9 digits past the decimal because of our
- * conversion method
- */
- if (max > 9)
- max = 9;
-
- /* We "cheat" by converting the fractional part to integer by
- * multiplying by a factor of 10
- */
- fracpart = round((pow10(max)) * (ufvalue - intpart));
-
- if (fracpart >= pow10(max)) {
- intpart++;
- fracpart -= (int64_t)pow10(max);
- }
-#ifdef DEBUG_SNPRINTF
- printf("fmtfp: %g %d.%d min=%d max=%d\n",
- (double)fvalue, intpart, fracpart, min, max);
-#endif
-
- /* Convert integer part */
- do {
- iconvert[iplace++] =
- (caps ? "0123456789ABCDEF" : "0123456789abcdef")[intpart % 10];
- intpart = (intpart / 10);
- } while (intpart && (iplace < 20));
- if (iplace == 20)
- iplace--;
- iconvert[iplace] = 0;
-
- /* Convert fractional part */
- do {
- fconvert[fplace++] =
- (caps ? "0123456789ABCDEF" : "0123456789abcdef")[fracpart % 10];
- fracpart = (fracpart / 10);
- } while (fracpart && (fplace < 20));
- if (fplace == 20)
- fplace--;
- fconvert[fplace] = 0;
-#else /* use fcvt() */
- if (max > 310)
- max = 310;
-# ifdef HAVE_FCVTL
- result = fcvtl(ufvalue, max, &dec_pt, &sig);
-# else
- result = fcvt(ufvalue, max, &dec_pt, &sig);
-# endif
-
- r_length = strlen(result);
-
- /*
- * Fix broken fcvt implementation returns..
- */
-
- if (r_length == 0) {
- result[0] = '0';
- result[1] = '\0';
- r_length = 1;
- }
-
- if (r_length < dec_pt)
- dec_pt = r_length;
-
- if (dec_pt <= 0) {
- iplace = 1;
- iconvert[0] = '0';
- iconvert[1] = '\0';
-
- fplace = 0;
-
- while (r_length)
- fconvert[fplace++] = result[--r_length];
-
- while ((dec_pt < 0) && (fplace < max)) {
- fconvert[fplace++] = '0';
- dec_pt++;
- }
- } else {
- int c;
-
- iplace = 0;
- for (c = dec_pt; c; iconvert[iplace++] = result[--c]);
- iconvert[iplace] = '\0';
-
- result += dec_pt;
- fplace = 0;
-
- for (c = (r_length - dec_pt); c; fconvert[fplace++] = result[--c]);
- }
-#endif /* HAVE_FCVT */
-
- /* -1 for decimal point, another -1 if we are printing a sign */
- padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0);
- zpadlen = max - fplace;
- if (zpadlen < 0) {
- zpadlen = 0;
- }
- if (padlen < 0) {
- padlen = 0;
- }
- if (flags & DP_F_MINUS) {
- padlen = -padlen; /* Left Justifty */
- }
-
- if ((flags & DP_F_ZERO) && (padlen > 0)) {
- if (signvalue) {
- outch(signvalue);
- --padlen;
- signvalue = 0;
- }
- while (padlen > 0) {
- outch('0');
- --padlen;
- }
- }
- while (padlen > 0) {
- outch(' ');
- --padlen;
- }
- if (signvalue) {
- outch(signvalue);
- }
-
- while (iplace > 0) {
- outch(iconvert[--iplace]);
- }
-
-
-#ifdef DEBUG_SNPRINTF
- printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen);
-#endif
-
- /*
- * Decimal point. This should probably use locale to find the correct
- * char to print out.
- */
- if (max > 0) {
- outch('.');
- while (fplace > 0) {
- outch(fconvert[--fplace]);
- }
- }
-
- while (zpadlen > 0) {
- outch('0');
- --zpadlen;
- }
-
- while (padlen < 0) {
- outch(' ');
- ++padlen;
- }
- return currlen;
-}
-#endif /* FP_OUTPUT */
-
-
-#ifdef TEST_PROGRAM
-
-#ifndef LONG_STRING
-#define LONG_STRING 1024
-#endif
-int main(void)
-{
- char buf1[LONG_STRING];
- char buf2[LONG_STRING];
-
-#ifdef FP_OUTPUT
- char *fp_fmt[] = {
- "%-1.5f",
- "%1.5f",
- "%123.9f",
- "%10.5f",
- "% 10.5f",
- "%+22.9f",
- "%+4.9f",
- "%01.3f",
- "%4f",
- "%3.1f",
- "%3.2f",
- "%.0f",
- "%.1f",
- NULL
- };
- double fp_nums[] = { -1.5, 134.21, 91340.2, 341.1234, 0203.9, 0.96, 0.996,
- 0.9996, 1.996, 4.136, 6442452944.1234, 0
- };
-#endif
- char *int_fmt[] = {
- "%-1.5d",
- "%1.5d",
- "%123.9d",
- "%5.5d",
- "%10.5d",
- "% 10.5d",
- "%+22.33d",
- "%01.3d",
- "%4d",
- "%-1.5ld",
- "%1.5ld",
- "%123.9ld",
- "%5.5ld",
- "%10.5ld",
- "% 10.5ld",
- "%+22.33ld",
- "%01.3ld",
- "%4ld",
- NULL
- };
- long int_nums[] = { -1, 134, 91340, 341, 0203, 0 };
-
- char *ll_fmt[] = {
- "%-1.8lld",
- "%1.8lld",
- "%123.9lld",
- "%5.8lld",
- "%10.5lld",
- "% 10.8lld",
- "%+22.33lld",
- "%01.3lld",
- "%4lld",
- NULL
- };
- int64_t ll_nums[] = { -1976, 789134567890LL, 91340, 34123, 0203, 0 };
-
- char *s_fmt[] = {
- "%-1.8s",
- "%1.8s",
- "%123.9s",
- "%5.8s",
- "%10.5s",
- "% 10.3s",
- "%+22.1s",
- "%01.3s",
- "%s",
- "%10s",
- "%3s",
- "%3.0s",
- "%3.s",
- NULL
- };
- char *s_nums[] = { "abc", "def", "ghi", "123", "4567", "a", "bb", "ccccccc", NULL};
-
-
- int x, y;
- int fail = 0;
- int num = 0;
-
- printf("Testing snprintf format codes against system sprintf...\n");
-
-#ifdef FP_OUTPUT
- for (x = 0; fp_fmt[x] != NULL; x++)
- for (y = 0; fp_nums[y] != 0; y++) {
- bsnprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]);
- sprintf(buf2, fp_fmt[x], fp_nums[y]);
- if (strcmp(buf1, buf2)) {
- printf
- ("snprintf doesn't match Format: %s\n\tsnprintf = %s\n\tsprintf = %s\n",
- fp_fmt[x], buf1, buf2);
- fail++;
- }
- num++;
- }
-#endif
-
- for (x = 0; int_fmt[x] != NULL; x++)
- for (y = 0; int_nums[y] != 0; y++) {
- int pcount, bcount;
- bcount = bsnprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]);
- printf("%s\n", buf1);
- pcount = sprintf(buf2, int_fmt[x], int_nums[y]);
- if (bcount != pcount) {
- printf("bsnprintf count %d doesn't match sprintf count %d\n",
- bcount, pcount);
- }
- if (strcmp(buf1, buf2)) {
- printf
- ("bsnprintf doesn't match Format: %s\n\tsnprintf = %s\n\tsprintf = %s\n",
- int_fmt[x], buf1, buf2);
- fail++;
- }
- num++;
- }
-
- for (x = 0; ll_fmt[x] != NULL; x++) {
- for (y = 0; ll_nums[y] != 0; y++) {
- int pcount, bcount;
- bcount = bsnprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]);
- printf("%s\n", buf1);
- pcount = sprintf(buf2, ll_fmt[x], ll_nums[y]);
- if (bcount != pcount) {
- printf("bsnprintf count %d doesn't match sprintf count %d\n",
- bcount, pcount);
- }
- if (strcmp(buf1, buf2)) {
- printf
- ("bsnprintf doesn't match Format: %s\n\tsnprintf = %s\n\tsprintf = %s\n",
- ll_fmt[x], buf1, buf2);
- fail++;
- }
- num++;
- }
- }
-
- for (x = 0; s_fmt[x] != NULL; x++) {
- for (y = 0; s_nums[y] != 0; y++) {
- int pcount, bcount;
- bcount = bsnprintf(buf1, sizeof(buf1), s_fmt[x], s_nums[y]);
- printf("%s\n", buf1);
- pcount = sprintf(buf2, s_fmt[x], s_nums[y]);
- if (bcount != pcount) {
- printf("bsnprintf count %d doesn't match sprintf count %d\n",
- bcount, pcount);
- }
- if (strcmp(buf1, buf2)) {
- printf
- ("bsnprintf doesn't match Format: %s\n\tsnprintf = %s\n\tsprintf = %s\n",
- s_fmt[x], buf1, buf2);
- fail++;
- }
- num++;
- }
- }
-
-
- printf("%d tests failed out of %d.\n", fail, num);
-}
-#endif /* TEST_PROGRAM */
-
-#endif /* USE_BSNPRINTF */
+++ /dev/null
-/*
- * Bacula red-black binary tree routines.
- *
- * btree is a binary tree with the links being in the data item.
- *
- * Developped in part from ideas obtained from several online University
- * courses.
- *
- * Kern Sibbald, November MMV
- *
- * Version $Id$
- *
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-#include "bacula.h"
-#include "btree.h"
-
-/* ===================================================================
- * btree
- */
-
-/*
- * Insert an item in the tree, but only if it is unique
- * otherwise, the item is returned non inserted
- * The big trick is keeping the tree balanced after the
- * insert. We use a parent pointer to make it simpler and
- * to avoid recursion.
- *
- * Returns: item if item inserted
- * other_item if same value already exists (item not inserted)
- */
-bnode *btree::insert(bnode *item, int compare(bnode *item1, bnode *item2))
-{
- bnode *x, *y;
- bnode *last = NULL; /* last leaf if not found */
- bnode *found = NULL;
- int comp = 0;
-
- /* Search */
- x = head;
- while (x && !found) {
- last = x;
- comp = compare(item, x);
- if (comp < 0) {
- x = x->left;
- } else if (comp > 0) {
- x = x->right;
- } else {
- found = x;
- }
- }
-
- if (found) { /* found? */
- return found; /* yes, return item found */
- }
- /* Handle empty tree */
- if (num_items == 0) {
- head = item;
- num_items++;
- return item;
- }
- x = last;
- /* Not found, so insert it on appropriate side of tree */
- if (comp < 0) {
- last->left = item;
- } else {
- last->right = item;
- }
- last->red = true;
- item->parent = last;
- num_items++;
-
- /* Now we must walk up the tree balancing it */
- x = last;
- while (x != head && x->parent->red) {
- if (x->parent == x->parent->parent->left) {
- /* Look at the right side of our grandparent */
- y = x->parent->parent->right;
- if (y && y->red) {
- /* our parent must be black */
- x->parent->red = false;
- y->red = false;
- x->parent->parent->red = true;
- x = x->parent->parent; /* move up to grandpa */
- } else {
- if (x == x->parent->right) { /* right side of parent? */
- x = x->parent;
- left_rotate(x);
- }
- /* make parent black too */
- x->parent->red = false;
- x->parent->parent->red = true;
- right_rotate(x->parent->parent);
- }
- } else {
- /* Look at left side of our grandparent */
- y = x->parent->parent->left;
- if (y && y->red) {
- x->parent->red = false;
- y->red = false;
- x->parent->parent->red = true;
- x = x->parent->parent; /* move up to grandpa */
- } else {
- if (x == x->parent->left) {
- x = x->parent;
- right_rotate(x);
- }
- /* make parent black too */
- x->parent->red = false;
- x->parent->parent->red = true;
- left_rotate(x->parent->parent);
- }
- }
- }
- /* Make sure the head is always black */
- head->red = false;
- return item;
-}
-
-
-/*
- * Search for item
- */
-bnode *btree::search(bnode *item, int compare(bnode *item1, bnode *item2))
-{
- bnode *found = NULL;
- bnode *x;
- int comp;
-
- x = head;
- while (x) {
- comp = compare(item, x);
- if (comp < 0) {
- x = x->left;
- } else if (comp > 0) {
- x = x->right;
- } else {
- found = x;
- break;
- }
- }
- return found;
-}
-
-/*
- * Get first item (i.e. lowest value)
- */
-bnode *btree::first(void)
-{
- bnode *x;
-
- x = head;
- down = true;
- while (x) {
- if (x->left) {
- x = x->left;
- continue;
- }
- return x;
- }
- /* Tree is empty */
- return NULL;
-}
-
-/*
- * This is a non-recursive btree walk routine that returns
- * the items one at a time in order. I've never seen a
- * non-recursive tree walk routine published that returns
- * one item at a time rather than doing a callback.
- *
- * Return the next item in sorted order. We assume first()
- * was called once before calling this routine.
- * We always go down as far as we can to the left, then up, and
- * down one to the right, and again down as far as we can to the
- * left. etc.
- *
- * Returns: pointer to next larger item
- * NULL when no more items in tree
- */
-bnode *btree::next(bnode *item)
-{
- bnode *x;
-
- x = item;
- if ((down && !x->left && x->right) || (!down && x->right)) {
- /* Move down to right one */
- down = true;
- x = x->right;
- /* Then all the way down left */
- while (x->left) {
- x = x->left;
- }
- return x;
- }
-
- /* We have gone down all we can, so now go up */
- for ( ;; ) {
- /* If at head, we are done */
- if (!x->parent) {
- return NULL;
- }
- /* Move up in tree */
- down = false;
- /* if coming from right, continue up */
- if (x->parent->right == x) {
- x = x->parent;
- continue;
- }
- /* Coming from left, go up one -- ie. return parent */
- return x->parent;
- }
-}
-
-/*
- * Similer to next(), but visits all right nodes when
- * coming up the tree.
- */
-bnode *btree::any(bnode *item)
-{
- bnode *x;
-
- x = item;
- if ((down && !x->left && x->right) || (!down && x->right)) {
- /* Move down to right one */
- down = true;
- x = x->right;
- /* Then all the way down left */
- while (x->left) {
- x = x->left;
- }
- return x;
- }
-
- /* We have gone down all we can, so now go up */
- for ( ;; ) {
- /* If at head, we are done */
- if (!x->parent) {
- return NULL;
- }
- down = false;
- /* Go up one and return parent */
- return x->parent;
- }
-}
-
-
-/* x is item, y is below and to right, then rotated to below left */
-void btree::left_rotate(bnode *item)
-{
- bnode *y;
- bnode *x;
-
- x = item;
- y = x->right;
- x->right = y->left;
- if (y->left) {
- y->left->parent = x;
- }
- y->parent = x->parent;
- /* if no parent then we have a new head */
- if (!x->parent) {
- head = y;
- } else if (x == x->parent->left) {
- x->parent->left = y;
- } else {
- x->parent->right = y;
- }
- y->left = x;
- x->parent = y;
-}
-
-void btree::right_rotate(bnode *item)
-{
- bnode *x, *y;
-
- y = item;
- x = y->left;
- y->left = x->right;
- if (x->right) {
- x->right->parent = y;
- }
- x->parent = y->parent;
- /* if no parent then we have a new head */
- if (!y->parent) {
- head = x;
- } else if (y == y->parent->left) {
- y->parent->left = x;
- } else {
- y->parent->right = x;
- }
- x->right = y;
- y->parent = x;
-}
-
-
-void btree::remove(bnode *item)
-{
-}
-
-/* Destroy the tree contents. Not totally working */
-void btree::destroy()
-{
- bnode *x, *y = NULL;
-
- x = first();
-// printf("head=%p first=%p left=%p right=%p\n", head, x, x->left, x->right);
-
- for ( ; (y=any(x)); ) {
- /* Prune the last item */
- if (x->parent) {
- if (x == x->parent->left) {
- x->parent->left = NULL;
- } else if (x == x->parent->right) {
- x->parent->right = NULL;
- }
- }
- if (!x->left && !x->right) {
- if (head == x) {
- head = NULL;
- }
-// if (num_items<30) {
-// printf("free nitems=%d item=%p left=%p right=%p\n", num_items, x, x->left, x->right);
-// }
- free((void *)x); /* free previous node */
- num_items--;
- }
- x = y; /* save last node */
- }
- if (x) {
- if (x == head) {
- head = NULL;
- }
-// printf("free nitems=%d item=%p left=%p right=%p\n", num_items, x, x->left, x->right);
- free((void *)x);
- num_items--;
- }
- if (head) {
-// printf("Free head\n");
- free((void *)head);
- }
-// printf("free nitems=%d\n", num_items);
-
- head = NULL;
-}
-
-
-
-#ifdef TEST_PROGRAM
-
-struct MYJCR {
- bnode link;
- char *buf;
-};
-
-static int my_compare(bnode *item1, bnode *item2)
-{
- MYJCR *jcr1, *jcr2;
- int comp;
- jcr1 = (MYJCR *)item1;
- jcr2 = (MYJCR *)item2;
- comp = strcmp(jcr1->buf, jcr2->buf);
- //Dmsg3(000, "compare=%d: %s to %s\n", comp, jcr1->buf, jcr2->buf);
- return comp;
-}
-
-int main()
-{
- char buf[30];
- btree *jcr_chain;
- MYJCR *jcr = NULL;
- MYJCR *jcr1;
-
-
- /* Now do a binary insert for the tree */
- jcr_chain = New(btree());
-#define CNT 26
- printf("append %d items\n", CNT*CNT*CNT);
- strcpy(buf, "ZZZ");
- int count = 0;
- for (int i=0; i<CNT; i++) {
- for (int j=0; j<CNT; j++) {
- for (int k=0; k<CNT; k++) {
- count++;
- if ((count & 0x3FF) == 0) {
- Dmsg1(000, "At %d\n", count);
- }
- jcr = (MYJCR *)malloc(sizeof(MYJCR));
- memset(jcr, 0, sizeof(MYJCR));
- jcr->buf = bstrdup(buf);
-// printf("buf=%p %s\n", jcr, jcr->buf);
- jcr1 = (MYJCR *)jcr_chain->insert((bnode *)jcr, my_compare);
- if (jcr != jcr1) {
- Dmsg2(000, "Insert of %s vs %s failed.\n", jcr->buf, jcr1->buf);
- }
- buf[1]--;
- }
- buf[1] = 'Z';
- buf[2]--;
- }
- buf[2] = 'Z';
- buf[0]--;
- }
- printf("%d items appended\n", CNT*CNT*CNT);
- printf("num_items=%d\n", jcr_chain->size());
-
- jcr = (MYJCR *)malloc(sizeof(MYJCR));
- memset(jcr, 0, sizeof(MYJCR));
-
- jcr->buf = bstrdup("a");
- if ((jcr1=(MYJCR *)jcr_chain->search((bnode *)jcr, my_compare))) {
- printf("One less failed!!!! Got: %s\n", jcr1->buf);
- } else {
- printf("One less: OK\n");
- }
- free(jcr->buf);
-
- jcr->buf = bstrdup("ZZZZZZZZZZZZZZZZ");
- if ((jcr1=(MYJCR *)jcr_chain->search((bnode *)jcr, my_compare))) {
- printf("One greater failed!!!! Got:%s\n", jcr1->buf);
- } else {
- printf("One greater: OK\n");
- }
- free(jcr->buf);
-
- jcr->buf = bstrdup("AAA");
- if ((jcr1=(MYJCR *)jcr_chain->search((bnode *)jcr, my_compare))) {
- printf("Search for AAA got %s\n", jcr1->buf);
- } else {
- printf("Search for AAA not found\n");
- }
- free(jcr->buf);
-
- jcr->buf = bstrdup("ZZZ");
- if ((jcr1 = (MYJCR *)jcr_chain->search((bnode *)jcr, my_compare))) {
- printf("Search for ZZZ got %s\n", jcr1->buf);
- } else {
- printf("Search for ZZZ not found\n");
- }
- free(jcr->buf);
- free(jcr);
-
-
- printf("Find each of %d items in tree.\n", count);
- for (jcr=(MYJCR *)jcr_chain->first(); jcr; (jcr=(MYJCR *)jcr_chain->next((bnode *)jcr)) ) {
-// printf("Got: %s\n", jcr->buf);
- if (!jcr_chain->search((bnode *)jcr, my_compare)) {
- printf("btree binary_search item not found = %s\n", jcr->buf);
- }
- }
- printf("Free each of %d items in tree.\n", count);
- for (jcr=(MYJCR *)jcr_chain->first(); jcr; (jcr=(MYJCR *)jcr_chain->next((bnode *)jcr)) ) {
-// printf("Free: %p %s\n", jcr, jcr->buf);
- free(jcr->buf);
- jcr->buf = NULL;
- }
- printf("num_items=%d\n", jcr_chain->size());
- delete jcr_chain;
-
-
- sm_dump(true);
-
-}
-#endif
+++ /dev/null
-/*
- * Version $Id$
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-
-/* ========================================================================
- *
- * red-black binary tree routines -- btree.h
- *
- * Kern Sibbald, MMV
- *
- */
-
-#define M_ABORT 1
-
-/*
- * There is a lot of extra casting here to work around the fact
- * that some compilers (Sun and Visual C++) do not accept
- * (bnode *) as an lvalue on the left side of an equal.
- *
- * Loop var through each member of list
- */
-#define foreach_btree(var, tree) \
- for(*((bnode **)&(var))=(tree)->first(); (*((bnode **)&(var))=(tree)->next((bnode *)var)); )
-
-#ifdef the_old_way
-#define foreach_btree(var, tree) \
- for((var)=(tree)->first(); (((bnode *)(var))=(tree)->next((bnode *)var)); )
-#endif
-
-struct bnode;
-struct bnode {
- bnode *left;
- bnode *right;
- bnode *parent;
- bool red;
-};
-
-class btree : public SMARTALLOC {
- bnode *head;
- uint32_t num_items;
- bool down;
- void left_rotate(bnode *item);
- void right_rotate(bnode *item);
-public:
- btree(void);
- ~btree() { destroy(); }
- void init(void);
- bnode *insert(bnode *item, int compare(bnode *item1, bnode *item2));
- bnode *search(bnode *item, int compare(bnode *item1, bnode *item2));
- bnode *first(void);
- bnode *next(bnode *item);
- bnode *any(bnode *item);
- void remove(bnode *item);
- int size() const;
- void destroy();
-};
-
-
-/*
- * This allows us to do explicit initialization,
- * allowing us to mix C++ classes inside malloc'ed
- * C structures. Define before called in constructor.
- */
-inline void btree::init()
-{
- head = NULL;
- num_items = 0;
-}
-
-
-/* Constructor with link at head of item */
-inline btree::btree(void) : head(0), num_items(0)
-{
-}
-
-inline int btree::size() const
-{
- return num_items;
-}
+++ /dev/null
-/*
- * crypto.c Encryption support functions
- *
- * Author: Landon Fuller <landonf@opendarwin.org>
- *
- * Version $Id$
- *
- * Copyright (C) 2005 Kern Sibbald
- *
- * This file was contributed to the Bacula project by Landon Fuller.
- *
- * Landon Fuller has been granted a perpetual, worldwide, non-exclusive,
- * no-charge, royalty-free, irrevocable copyright license to reproduce,
- * prepare derivative works of, publicly display, publicly perform,
- * sublicense, and distribute the original work contributed by Landon Fuller
- * to the Bacula project in source or object form.
- *
- * If you wish to license these contributions under an alternate open source
- * license please contact Landon Fuller <landonf@opendarwin.org>.
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-
-#include "bacula.h"
-#include <assert.h>
-
-/*
- * Bacula ASN.1 Syntax
- *
- * OID Allocation:
- * Prefix: iso.org.dod.internet.private.enterprise.threerings.external.bacula (1.3.6.1.4.1.22054.500.2)
- * Organization: Bacula Project
- * Contact Name: Kern Sibbald
- * Contact E-mail: kern@sibbald.com
- *
- * Top Level Allocations - 500.2
- * 1 - Published Allocations
- * 1.1 - Bacula Encryption
- *
- * Bacula Encryption - 500.2.1.1
- * 1 - ASN.1 Modules
- * 1.1 - BaculaCrypto
- * 2 - ASN.1 Object Identifiers
- * 2.1 - SignatureData
- * 2.2 - SignerInfo
- * 2.3 - CryptoData
- * 2.4 - RecipientInfo
- *
- * BaculaCrypto { iso(1) identified-organization(3) usdod(6)
- * internet(1) private(4) enterprises(1) three-rings(22054)
- * external(500) bacula(2) published(1) bacula-encryption(1)
- * asn1-modules(1) bacula-crypto(1) }
- *
- * DEFINITIONS AUTOMATIC TAGS ::=
- * BEGIN
- *
- * SignatureData ::= SEQUENCE {
- * version Version DEFAULT v0,
- * signerInfo SignerInfo }
- *
- * CryptoData ::= SEQUENCE {
- * version Version DEFAULT v0,
- * contentEncryptionAlgorithm ContentEncryptionAlgorithmIdentifier,
- * iv InitializationVector,
- * recipientInfo RecipientInfo
- * }
- *
- * SignerInfo ::= SET OF SignerInfo
- * RecipientInfo ::= SET OF RecipientInfo
- *
- * Version ::= INTEGER { v0(0) }
- *
- * SignerInfo ::= SEQUENCE {
- * version Version,
- * subjectKeyIdentifier SubjectKeyIdentifier,
- * digestAlgorithm DigestAlgorithmIdentifier,
- * signatureAlgorithm SignatureAlgorithmIdentifier,
- * signature SignatureValue }
- *
- * RecipientInfo ::= SEQUENCE {
- * version Version
- * subjectKeyIdentifier SubjectKeyIdentifier
- * keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier
- * encryptedKey EncryptedKey
- * }
- *
- * SubjectKeyIdentifier ::= OCTET STRING
- *
- * DigestAlgorithmIdentifier ::= AlgorithmIdentifier
- *
- * SignatureAlgorithmIdentifier ::= AlgorithmIdentifier
- *
- * KeyEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
- *
- * ContentEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
- *
- * InitializationVector ::= OCTET STRING
- *
- * SignatureValue ::= OCTET STRING
- *
- * EncryptedKey ::= OCTET STRING
- *
- * AlgorithmIdentifier ::= OBJECT IDENTIFIER
- *
- * END
- */
-
-#ifdef HAVE_CRYPTO /* Is encryption enabled? */
-#ifdef HAVE_OPENSSL /* How about OpenSSL? */
-
-/* Are we initialized? */
-static int crypto_initialized = false;
-
-/* ASN.1 Declarations */
-#define BACULA_ASN1_VERSION 0
-
-typedef struct {
- ASN1_INTEGER *version;
- ASN1_OCTET_STRING *subjectKeyIdentifier;
- ASN1_OBJECT *digestAlgorithm;
- ASN1_OBJECT *signatureAlgorithm;
- ASN1_OCTET_STRING *signature;
-} SignerInfo;
-
-typedef struct {
- ASN1_INTEGER *version;
- ASN1_OCTET_STRING *subjectKeyIdentifier;
- ASN1_OBJECT *keyEncryptionAlgorithm;
- ASN1_OCTET_STRING *encryptedKey;
-} RecipientInfo;
-
-ASN1_SEQUENCE(SignerInfo) = {
- ASN1_SIMPLE(SignerInfo, version, ASN1_INTEGER),
- ASN1_SIMPLE(SignerInfo, subjectKeyIdentifier, ASN1_OCTET_STRING),
- ASN1_SIMPLE(SignerInfo, digestAlgorithm, ASN1_OBJECT),
- ASN1_SIMPLE(SignerInfo, signatureAlgorithm, ASN1_OBJECT),
- ASN1_SIMPLE(SignerInfo, signature, ASN1_OCTET_STRING)
-} ASN1_SEQUENCE_END(SignerInfo);
-
-ASN1_SEQUENCE(RecipientInfo) = {
- ASN1_SIMPLE(RecipientInfo, version, ASN1_INTEGER),
- ASN1_SIMPLE(RecipientInfo, subjectKeyIdentifier, ASN1_OCTET_STRING),
- ASN1_SIMPLE(RecipientInfo, keyEncryptionAlgorithm, ASN1_OBJECT),
- ASN1_SIMPLE(RecipientInfo, encryptedKey, ASN1_OCTET_STRING),
-} ASN1_SEQUENCE_END(RecipientInfo);
-
-typedef struct {
- ASN1_INTEGER *version;
- STACK_OF(SignerInfo) *signerInfo;
-} SignatureData;
-
-typedef struct {
- ASN1_INTEGER *version;
- ASN1_OBJECT *contentEncryptionAlgorithm;
- ASN1_OCTET_STRING *iv;
- STACK_OF(RecipientInfo) *recipientInfo;
-} CryptoData;
-
-ASN1_SEQUENCE(SignatureData) = {
- ASN1_SIMPLE(SignatureData, version, ASN1_INTEGER),
- ASN1_SET_OF(SignatureData, signerInfo, SignerInfo),
-} ASN1_SEQUENCE_END(SignatureData);
-
-ASN1_SEQUENCE(CryptoData) = {
- ASN1_SIMPLE(CryptoData, version, ASN1_INTEGER),
- ASN1_SIMPLE(CryptoData, iv, ASN1_OCTET_STRING),
- ASN1_SET_OF(CryptoData, recipientInfo, RecipientInfo)
-} ASN1_SEQUENCE_END(CryptoData);
-
-IMPLEMENT_ASN1_FUNCTIONS(SignerInfo)
-IMPLEMENT_ASN1_FUNCTIONS(RecipientInfo)
-IMPLEMENT_ASN1_FUNCTIONS(SignatureData)
-IMPLEMENT_ASN1_FUNCTIONS(CryptoData)
-IMPLEMENT_STACK_OF(SignerInfo)
-IMPLEMENT_STACK_OF(RecipientInfo)
-
-/*
- * SignerInfo and RecipientInfo stack macros, generated by OpenSSL's util/mkstack.pl.
- */
-#define sk_SignerInfo_new(st) SKM_sk_new(SignerInfo, (st))
-#define sk_SignerInfo_new_null() SKM_sk_new_null(SignerInfo)
-#define sk_SignerInfo_free(st) SKM_sk_free(SignerInfo, (st))
-#define sk_SignerInfo_num(st) SKM_sk_num(SignerInfo, (st))
-#define sk_SignerInfo_value(st, i) SKM_sk_value(SignerInfo, (st), (i))
-#define sk_SignerInfo_set(st, i, val) SKM_sk_set(SignerInfo, (st), (i), (val))
-#define sk_SignerInfo_zero(st) SKM_sk_zero(SignerInfo, (st))
-#define sk_SignerInfo_push(st, val) SKM_sk_push(SignerInfo, (st), (val))
-#define sk_SignerInfo_unshift(st, val) SKM_sk_unshift(SignerInfo, (st), (val))
-#define sk_SignerInfo_find(st, val) SKM_sk_find(SignerInfo, (st), (val))
-#define sk_SignerInfo_delete(st, i) SKM_sk_delete(SignerInfo, (st), (i))
-#define sk_SignerInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(SignerInfo, (st), (ptr))
-#define sk_SignerInfo_insert(st, val, i) SKM_sk_insert(SignerInfo, (st), (val), (i))
-#define sk_SignerInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(SignerInfo, (st), (cmp))
-#define sk_SignerInfo_dup(st) SKM_sk_dup(SignerInfo, st)
-#define sk_SignerInfo_pop_free(st, free_func) SKM_sk_pop_free(SignerInfo, (st), (free_func))
-#define sk_SignerInfo_shift(st) SKM_sk_shift(SignerInfo, (st))
-#define sk_SignerInfo_pop(st) SKM_sk_pop(SignerInfo, (st))
-#define sk_SignerInfo_sort(st) SKM_sk_sort(SignerInfo, (st))
-#define sk_SignerInfo_is_sorted(st) SKM_sk_is_sorted(SignerInfo, (st))
-
-#define d2i_ASN1_SET_OF_SignerInfo(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
- SKM_ASN1_SET_OF_d2i(SignerInfo, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class))
-#define i2d_ASN1_SET_OF_SignerInfo(st, pp, i2d_func, ex_tag, ex_class, is_set) \
- SKM_ASN1_SET_OF_i2d(SignerInfo, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_SignerInfo(st, i2d_func, buf, len) \
- SKM_ASN1_seq_pack(SignerInfo, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_SignerInfo(buf, len, d2i_func, free_func) \
- SKM_ASN1_seq_unpack(SignerInfo, (buf), (len), (d2i_func), (free_func))
-
-#define sk_RecipientInfo_new(st) SKM_sk_new(RecipientInfo, (st))
-#define sk_RecipientInfo_new_null() SKM_sk_new_null(RecipientInfo)
-#define sk_RecipientInfo_free(st) SKM_sk_free(RecipientInfo, (st))
-#define sk_RecipientInfo_num(st) SKM_sk_num(RecipientInfo, (st))
-#define sk_RecipientInfo_value(st, i) SKM_sk_value(RecipientInfo, (st), (i))
-#define sk_RecipientInfo_set(st, i, val) SKM_sk_set(RecipientInfo, (st), (i), (val))
-#define sk_RecipientInfo_zero(st) SKM_sk_zero(RecipientInfo, (st))
-#define sk_RecipientInfo_push(st, val) SKM_sk_push(RecipientInfo, (st), (val))
-#define sk_RecipientInfo_unshift(st, val) SKM_sk_unshift(RecipientInfo, (st), (val))
-#define sk_RecipientInfo_find(st, val) SKM_sk_find(RecipientInfo, (st), (val))
-#define sk_RecipientInfo_delete(st, i) SKM_sk_delete(RecipientInfo, (st), (i))
-#define sk_RecipientInfo_delete_ptr(st, ptr) SKM_sk_delete_ptr(RecipientInfo, (st), (ptr))
-#define sk_RecipientInfo_insert(st, val, i) SKM_sk_insert(RecipientInfo, (st), (val), (i))
-#define sk_RecipientInfo_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(RecipientInfo, (st), (cmp))
-#define sk_RecipientInfo_dup(st) SKM_sk_dup(RecipientInfo, st)
-#define sk_RecipientInfo_pop_free(st, free_func) SKM_sk_pop_free(RecipientInfo, (st), (free_func))
-#define sk_RecipientInfo_shift(st) SKM_sk_shift(RecipientInfo, (st))
-#define sk_RecipientInfo_pop(st) SKM_sk_pop(RecipientInfo, (st))
-#define sk_RecipientInfo_sort(st) SKM_sk_sort(RecipientInfo, (st))
-#define sk_RecipientInfo_is_sorted(st) SKM_sk_is_sorted(RecipientInfo, (st))
-
-#define d2i_ASN1_SET_OF_RecipientInfo(st, pp, length, d2i_func, free_func, ex_tag, ex_class) \
- SKM_ASN1_SET_OF_d2i(RecipientInfo, (st), (pp), (length), (d2i_func), (free_func), (ex_tag), (ex_class))
-#define i2d_ASN1_SET_OF_RecipientInfo(st, pp, i2d_func, ex_tag, ex_class, is_set) \
- SKM_ASN1_SET_OF_i2d(RecipientInfo, (st), (pp), (i2d_func), (ex_tag), (ex_class), (is_set))
-#define ASN1_seq_pack_RecipientInfo(st, i2d_func, buf, len) \
- SKM_ASN1_seq_pack(RecipientInfo, (st), (i2d_func), (buf), (len))
-#define ASN1_seq_unpack_RecipientInfo(buf, len, d2i_func, free_func) \
- SKM_ASN1_seq_unpack(RecipientInfo, (buf), (len), (d2i_func), (free_func))
-/* End of util/mkstack.pl block */
-
-/* X509 Public/Private Key Pair Structure */
-struct X509_Keypair {
- ASN1_OCTET_STRING *keyid;
- EVP_PKEY *pubkey;
- EVP_PKEY *privkey;
-};
-
-/* Message Digest Structure */
-struct Digest {
- crypto_digest_t type;
- EVP_MD_CTX ctx;
-};
-
-/* Message Signature Structure */
-struct Signature {
- SignatureData *sigData;
-};
-
-/* Encryption Key Data */
-struct Crypto_Recipients {
- CryptoData *cryptoData; /* ASN.1 Structure */
- EVP_CIPHER *openssl_cipher; /* OpenSSL Cipher Object */
- unsigned char session_key[EVP_MAX_KEY_LENGTH]; /* Private symmetric session key */
- size_t session_key_len; /* Symmetric session key length */
-};
-
-/* PEM Password Dispatch Context */
-typedef struct PEM_CB_Context {
- CRYPTO_PEM_PASSWD_CB *pem_callback;
- const void *pem_userdata;
-} PEM_CB_CONTEXT;
-
-/*
- * Extract subjectKeyIdentifier from x509 certificate.
- * Returns: On success, an ASN1_OCTET_STRING that must be freed via M_ASN1_OCTET_STRING_free().
- * NULL on failure.
- */
-static ASN1_OCTET_STRING *openssl_cert_keyid(X509 *cert){
- X509_EXTENSION *ext;
- X509V3_EXT_METHOD *method;
- ASN1_OCTET_STRING *keyid;
- int i;
-#if (OPENSSL_VERSION_NUMBER >= 0x0090800FL)
- const unsigned char *ext_value_data;
-#else
- unsigned char *ext_value_data;
-#endif
-
-
- /* Find the index to the subjectKeyIdentifier extension */
- i = X509_get_ext_by_NID(cert, NID_subject_key_identifier, -1);
- if (i < 0) {
- /* Not found */
- return NULL;
- }
-
- /* Grab the extension */
- ext = X509_get_ext(cert, i);
-
- /* Get x509 extension method structure */
- if (!(method = X509V3_EXT_get(ext))) {
- return NULL;
- }
-
- ext_value_data = ext->value->data;
-
-#if (OPENSSL_VERSION_NUMBER > 0x00907000L)
- if (method->it) {
- /* New style ASN1 */
-
- /* Decode ASN1 item in data */
- keyid = (ASN1_OCTET_STRING *) ASN1_item_d2i(NULL, &ext_value_data, ext->value->length,
- ASN1_ITEM_ptr(method->it));
- } else {
- /* Old style ASN1 */
-
- /* Decode ASN1 item in data */
- keyid = (ASN1_OCTET_STRING *) method->d2i(NULL, &ext_value_data, ext->value->length);
- }
-
-#else
- keyid = (ASN1_OCTET_STRING *) method->d2i(NULL, &ext_value_data, ext->value->length);
-#endif
-
- return keyid;
-}
-
-/*
- * Create a new keypair object.
- * Returns: A pointer to a X509 KEYPAIR object on success.
- * NULL on failure.
- */
-X509_KEYPAIR *crypto_keypair_new (void) {
- X509_KEYPAIR *keypair;
-
- /* Allocate our keypair structure */
- keypair = (X509_KEYPAIR *) malloc(sizeof(X509_KEYPAIR));
- if (!keypair) {
- return NULL;
- }
-
- /* Initialize our keypair structure */
- keypair->keyid = NULL;
- keypair->pubkey = NULL;
- keypair->privkey = NULL;
-
- return keypair;
-}
-
-/*
- * Create a copy of a keypair object. The underlying
- * EVP objects are not duplicated, as no EVP_PKEY_dup()
- * API is available. Instead, the reference count is
- * incremented.
- */
-X509_KEYPAIR *crypto_keypair_dup (X509_KEYPAIR *keypair)
-{
- X509_KEYPAIR *newpair;
-
- newpair = crypto_keypair_new();
-
- if (!newpair) {
- /* Allocation failed */
- return NULL;
- }
-
- /* Increment the public key ref count */
- if (keypair->pubkey) {
- CRYPTO_add(&(keypair->pubkey->references), 1, CRYPTO_LOCK_EVP_PKEY);
- newpair->pubkey = keypair->pubkey;
- }
-
- /* Increment the private key ref count */
- if (keypair->privkey) {
- CRYPTO_add(&(keypair->privkey->references), 1, CRYPTO_LOCK_EVP_PKEY);
- newpair->privkey = keypair->privkey;
- }
-
- /* Duplicate the keyid */
- if (keypair->keyid) {
- newpair->keyid = M_ASN1_OCTET_STRING_dup(keypair->keyid);
- if (!newpair->keyid) {
- /* Allocation failed */
- crypto_keypair_free(newpair);
- return NULL;
- }
- }
-
- return newpair;
-}
-
-
-/*
- * Load a public key from a PEM-encoded x509 certificate.
- * Returns: true on success
- * false on failure
- */
-int crypto_keypair_load_cert (X509_KEYPAIR *keypair, const char *file)
-{
- BIO *bio;
- X509 *cert;
-
- /* Open the file */
- if (!(bio = BIO_new_file(file, "r"))) {
- openssl_post_errors(M_ERROR, _("Unable to open certificate file"));
- return false;
- }
-
- cert = PEM_read_bio_X509(bio, NULL, NULL, NULL);
- BIO_free(bio);
- if (!cert) {
- openssl_post_errors(M_ERROR, _("Unable to read certificate from file"));
- return false;
- }
-
- /* Extract the public key */
- if (!(keypair->pubkey = X509_get_pubkey(cert))) {
- openssl_post_errors(M_ERROR, _("Unable to extract public key from certificate"));
- goto err;
- }
-
- /* Extract the subjectKeyIdentifier extension field */
- if ((keypair->keyid = openssl_cert_keyid(cert)) == NULL) {
- Emsg0(M_ERROR, 0, _("Provided certificate does not include the required subjectKeyIdentifier extension."));
- goto err;
- }
-
- /* Validate the public key type (only RSA is supported) */
- if (EVP_PKEY_type(keypair->pubkey->type) != EVP_PKEY_RSA) {
- Emsg1(M_ERROR, 0, _("Unsupported key type provided: %d\n"), EVP_PKEY_type(keypair->pubkey->type));
- goto err;
- }
-
- return true;
-
-err:
- X509_free(cert);
- if (keypair->pubkey) {
- EVP_PKEY_free(keypair->pubkey);
- }
- return false;
-}
-
-/* Dispatch user PEM encryption callbacks */
-static int crypto_pem_callback_dispatch (char *buf, int size, int rwflag, void *userdata)
-{
- PEM_CB_CONTEXT *ctx = (PEM_CB_CONTEXT *) userdata;
- return (ctx->pem_callback(buf, size, ctx->pem_userdata));
-}
-
-/*
- * Load a PEM-encoded private key.
- * Returns: true on success
- * false on failure
- */
-int crypto_keypair_load_key (X509_KEYPAIR *keypair, const char *file,
- CRYPTO_PEM_PASSWD_CB *pem_callback,
- const void *pem_userdata)
-{
- BIO *bio;
- PEM_CB_CONTEXT ctx;
-
- /* Open the file */
- if (!(bio = BIO_new_file(file, "r"))) {
- openssl_post_errors(M_ERROR, _("Unable to open private key file"));
- return false;
- }
-
- /* Set up PEM encryption callback */
- if (pem_callback) {
- ctx.pem_callback = pem_callback;
- ctx.pem_userdata = pem_userdata;
- } else {
- ctx.pem_callback = crypto_default_pem_callback;
- ctx.pem_userdata = NULL;
- }
-
- keypair->privkey = PEM_read_bio_PrivateKey(bio, NULL, crypto_pem_callback_dispatch, &ctx);
- BIO_free(bio);
- if (!keypair->privkey) {
- openssl_post_errors(M_ERROR, _("Unable to read private key from file"));
- return false;
- }
-
- return true;
-}
-
-/*
- * Free memory associated with a keypair object.
- */
-void crypto_keypair_free (X509_KEYPAIR *keypair)
-{
- if (keypair->pubkey) {
- EVP_PKEY_free(keypair->pubkey);
- }
- if (keypair->privkey) {
- EVP_PKEY_free(keypair->privkey);
- }
- if (keypair->keyid) {
- M_ASN1_OCTET_STRING_free(keypair->keyid);
- }
- free(keypair);
-}
-
-/*
- * Create a new message digest context of the specified type
- * Returns: A pointer to a DIGEST object on success.
- * NULL on failure.
- */
-DIGEST *crypto_digest_new (crypto_digest_t type)
-{
- DIGEST *digest;
- const EVP_MD *md = NULL; /* Quell invalid uninitialized warnings */
-
- digest = (DIGEST *) malloc(sizeof(DIGEST));
- digest->type = type;
-
- /* Initialize the OpenSSL message digest context */
- EVP_MD_CTX_init(&digest->ctx);
-
- /* Determine the correct OpenSSL message digest type */
- switch (type) {
- case CRYPTO_DIGEST_MD5:
- md = EVP_md5();
- break;
- case CRYPTO_DIGEST_SHA1:
- md = EVP_sha1();
- break;
-#ifdef HAVE_SHA2
- case CRYPTO_DIGEST_SHA256:
- md = EVP_sha256();
- break;
- case CRYPTO_DIGEST_SHA512:
- md = EVP_sha512();
- break;
-#endif
- default:
- Emsg1(M_ERROR, 0, _("Unsupported digest type: %d\n"), type);
- goto err;
- }
-
- /* Initialize the backing OpenSSL context */
- if (EVP_DigestInit_ex(&digest->ctx, md, NULL) == 0) {
- goto err;
- }
-
- return digest;
-
-err:
- /* This should not happen, but never say never ... */
- openssl_post_errors(M_ERROR, _("OpenSSL digest initialization failed"));
- crypto_digest_free(digest);
- return NULL;
-}
-
-/*
- * Hash length bytes of data into the provided digest context.
- * Returns: true on success
- * false on failure
- */
-bool crypto_digest_update (DIGEST *digest, const void *data, size_t length) {
- if (EVP_DigestUpdate(&digest->ctx, data, length) == 0) {
- return true;
- } else {
- return false;
- }
-}
-
-/*
- * Finalize the data in digest, storing the result in dest and the result size
- * in length. The result size can be determined with crypto_digest_size().
- *
- * Returns: true on success
- * false on failure
- */
-bool crypto_digest_finalize (DIGEST *digest, void *dest, size_t *length) {
- if (!EVP_DigestFinal(&digest->ctx, (unsigned char *) dest, length)) {
- return false;
- } else {
- return true;
- }
-}
-
-/*
- * Free memory associated with a digest object.
- */
-void crypto_digest_free (DIGEST *digest)
-{
- EVP_MD_CTX_cleanup(&digest->ctx);
- free (digest);
-}
-
-/*
- * Create a new message signature context.
- * Returns: A pointer to a SIGNATURE object on success.
- * NULL on failure.
- */
-SIGNATURE *crypto_sign_new (void)
-{
- SIGNATURE *sig;
-
- sig = (SIGNATURE *) malloc(sizeof(SIGNATURE));
- if (!sig) {
- return NULL;
- }
-
- sig->sigData = SignatureData_new();
-
- if (!sig->sigData) {
- /* Allocation failed in OpenSSL */
- free(sig);
- return NULL;
- }
-
- /* Set the ASN.1 structure version number */
- ASN1_INTEGER_set(sig->sigData->version, BACULA_ASN1_VERSION);
-
- return sig;
-}
-
-/*
- * For a given public key, find the associated SignatureInfo record
- * and create a digest context for signature validation
- * Returns: CRYPTO_ERROR_NONE on success, with the newly allocated DIGEST in digest.
- * A crypto_error_t value on failure.
- */
-crypto_error_t crypto_sign_get_digest(SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST **digest)
-{
- STACK_OF(SignerInfo) *signers;
- SignerInfo *si;
- int i;
-
- signers = sig->sigData->signerInfo;
-
- for (i = 0; i < sk_SignerInfo_num(signers); i++) {
- si = sk_SignerInfo_value(signers, i);
- if (M_ASN1_OCTET_STRING_cmp(keypair->keyid, si->subjectKeyIdentifier) == 0) {
- /* Get the digest algorithm and allocate a digest context */
- switch (OBJ_obj2nid(si->digestAlgorithm)) {
- case NID_md5:
- *digest = crypto_digest_new(CRYPTO_DIGEST_MD5);
- break;
- case NID_sha1:
- *digest = crypto_digest_new(CRYPTO_DIGEST_SHA1);
- break;
-#ifdef HAVE_SHA2
- case NID_sha256:
- *digest = crypto_digest_new(CRYPTO_DIGEST_SHA256);
- break;
- case NID_sha512:
- *digest = crypto_digest_new(CRYPTO_DIGEST_SHA512);
- break;
-#endif
- default:
- *digest = NULL;
- return CRYPTO_ERROR_INVALID_DIGEST;
- }
-
- /* Shouldn't happen */
- if (*digest == NULL) {
- return CRYPTO_ERROR_INVALID_DIGEST;
- } else {
- return CRYPTO_ERROR_NONE;
- }
- }
- }
-
- return CRYPTO_ERROR_NOSIGNER;
-}
-
-/*
- * For a given signature, public key, and digest, verify the SIGNATURE.
- * Returns: CRYPTO_ERROR_NONE on success.
- * A crypto_error_t value on failure.
- */
-crypto_error_t crypto_sign_verify(SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest)
-{
- STACK_OF(SignerInfo) *signers;
- SignerInfo *si;
- int ok, i;
- unsigned int sigLen;
-#if (OPENSSL_VERSION_NUMBER >= 0x0090800FL)
- const unsigned char *sigData;
-#else
- unsigned char *sigData;
-#endif
-
- signers = sig->sigData->signerInfo;
-
- /* Find the signer */
- for (i = 0; i < sk_SignerInfo_num(signers); i++) {
- si = sk_SignerInfo_value(signers, i);
- if (M_ASN1_OCTET_STRING_cmp(keypair->keyid, si->subjectKeyIdentifier) == 0) {
- /* Extract the signature data */
- sigLen = M_ASN1_STRING_length(si->signature);
- sigData = M_ASN1_STRING_data(si->signature);
-
- ok = EVP_VerifyFinal(&digest->ctx, sigData, sigLen, keypair->pubkey);
- if (ok >= 1) {
- return CRYPTO_ERROR_NONE;
- } else if (ok == 0) {
- return CRYPTO_ERROR_BAD_SIGNATURE;
- } else if (ok < 0) {
- /* Shouldn't happen */
- openssl_post_errors(M_ERROR, _("OpenSSL error occured"));
- return CRYPTO_ERROR_INTERNAL;
- }
- }
- }
-
- /* Signer wasn't found. */
- return CRYPTO_ERROR_NOSIGNER;
-}
-
-
-/*
- * Add a new signer
- * Returns: true on success
- * false on failure
- */
-int crypto_sign_add_signer(SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair)
-{
- SignerInfo *si = NULL;
- unsigned char *buf = NULL;
- unsigned int len;
-
- si = SignerInfo_new();
-
- if (!si) {
- /* Allocation failed in OpenSSL */
- return false;
- }
-
- /* Set the ASN.1 structure version number */
- ASN1_INTEGER_set(si->version, BACULA_ASN1_VERSION);
-
- /* Set the digest algorithm identifier */
- switch (digest->type) {
- case CRYPTO_DIGEST_MD5:
- si->digestAlgorithm = OBJ_nid2obj(NID_md5);
- break;
- case CRYPTO_DIGEST_SHA1:
- si->digestAlgorithm = OBJ_nid2obj(NID_sha1);
- break;
-#ifdef HAVE_SHA2
- case CRYPTO_DIGEST_SHA256:
- si->digestAlgorithm = OBJ_nid2obj(NID_sha256);
- break;
- case CRYPTO_DIGEST_SHA512:
- si->digestAlgorithm = OBJ_nid2obj(NID_sha512);
- break;
-#endif
- default:
- /* This should never happen */
- goto err;
- }
-
- /* Drop the string allocated by OpenSSL, and add our subjectKeyIdentifier */
- M_ASN1_OCTET_STRING_free(si->subjectKeyIdentifier);
- si->subjectKeyIdentifier = M_ASN1_OCTET_STRING_dup(keypair->keyid);
-
- /* Set our signature algorithm. We currently require RSA */
- assert(EVP_PKEY_type(keypair->pubkey->type) == EVP_PKEY_RSA);
- /* This is slightly evil. Reach into the MD structure and grab the key type */
- si->signatureAlgorithm = OBJ_nid2obj(digest->ctx.digest->pkey_type);
-
- /* Finalize/Sign our Digest */
- len = EVP_PKEY_size(keypair->privkey);
- buf = (unsigned char *) malloc(len);
- if (!EVP_SignFinal(&digest->ctx, buf, &len, keypair->privkey)) {
- openssl_post_errors(M_ERROR, _("Signature creation failed"));
- goto err;
- }
-
- /* Add the signature to the SignerInfo structure */
- if (!M_ASN1_OCTET_STRING_set(si->signature, buf, len)) {
- /* Allocation failed in OpenSSL */
- goto err;
- }
-
- /* No longer needed */
- free(buf);
-
- /* Push the new SignerInfo structure onto the stack */
- sk_SignerInfo_push(sig->sigData->signerInfo, si);
-
- return true;
-
-err:
- if (si) {
- SignerInfo_free(si);
- }
- if (buf) {
- free(buf);
- }
-
- return false;
-}
-
-/*
- * Encodes the SignatureData structure. The length argument is used to specify the
- * size of dest. A length of 0 will cause no data to be written to dest, and the
- * required length to be written to length. The caller can then allocate sufficient
- * space for the output.
- *
- * Returns: true on success, stores the encoded data in dest, and the size in length.
- * false on failure.
- */
-int crypto_sign_encode(SIGNATURE *sig, void *dest, size_t *length)
-{
- if (*length == 0) {
- *length = i2d_SignatureData(sig->sigData, NULL);
- return true;
- }
-
- *length = i2d_SignatureData(sig->sigData, (unsigned char **) &dest);
- return true;
-}
-
-/*
- * Decodes the SignatureData structure. The length argument is used to specify the
- * size of sigData.
- *
- * Returns: SIGNATURE instance on success.
- * NULL on failure.
-
- */
-
-SIGNATURE *crypto_sign_decode(const void *sigData, size_t length)
-{
- SIGNATURE *sig;
-#if (OPENSSL_VERSION_NUMBER >= 0x0090800FL)
- const unsigned char *p = (const unsigned char *) sigData;
-#else
- unsigned char *p = (unsigned char *) sigData;
-#endif
-
- sig = (SIGNATURE *) malloc(sizeof(SIGNATURE));
- if (!sig) {
- return NULL;
- }
-
- /* d2i_SignatureData modifies the supplied pointer */
- sig->sigData = d2i_SignatureData(NULL, &p, length);
-
- if (!sig->sigData) {
- /* Allocation / Decoding failed in OpenSSL */
- openssl_post_errors(M_ERROR, _("Signature decoding failed"));
- return NULL;
- }
-
- return sig;
-}
-
-/*
- * Free memory associated with a signature object.
- */
-void crypto_sign_free(SIGNATURE *sig)
-{
- SignatureData_free(sig->sigData);
- free (sig);
-}
-
-/*
- * Create a new encryption recipient.
- * Returns: A pointer to a CRYPTO_RECIPIENTS object on success.
- * NULL on failure.
- */
-CRYPTO_RECIPIENTS *crypto_recipients_new (crypto_cipher_t cipher, alist *pubkeys)
-{
- CRYPTO_RECIPIENTS *cr;
- X509_KEYPAIR *keypair;
- const EVP_CIPHER *ec;
- unsigned char *iv;
- int iv_len;
-
- /* Allocate our recipient description structures */
- cr = (CRYPTO_RECIPIENTS *) malloc(sizeof(CRYPTO_RECIPIENTS));
- if (!cr) {
- return NULL;
- }
-
- cr->cryptoData = CryptoData_new();
-
- if (!cr->cryptoData) {
- /* Allocation failed in OpenSSL */
- free(cr);
- return NULL;
- }
-
- /* Set the ASN.1 structure version number */
- ASN1_INTEGER_set(cr->cryptoData->version, BACULA_ASN1_VERSION);
-
- /*
- * Acquire a cipher instance and set the ASN.1 cipher NID
- */
- switch (cipher) {
- case CRYPTO_CIPHER_AES_128_CBC:
- /* AES 128 bit CBC */
- cr->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_128_cbc);
- ec = EVP_aes_128_cbc();
- break;
- case CRYPTO_CIPHER_AES_192_CBC:
- /* AES 192 bit CBC */
- cr->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_192_cbc);
- ec = EVP_aes_192_cbc();
- break;
- case CRYPTO_CIPHER_AES_256_CBC:
- /* AES 256 bit CBC */
- cr->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_aes_256_cbc);
- ec = EVP_aes_256_cbc();
- break;
- case CRYPTO_CIPHER_BLOWFISH_CBC:
- /* Blowfish CBC */
- cr->cryptoData->contentEncryptionAlgorithm = OBJ_nid2obj(NID_bf_cbc);
- ec = EVP_bf_cbc();
- break;
- default:
- Emsg0(M_ERROR, 0, _("Unsupported cipher type specified\n"));
- crypto_recipients_free(cr);
- return NULL;
- }
-
- /* Generate a symmetric session key */
- cr->session_key_len = EVP_CIPHER_key_length(ec);
- if (RAND_bytes(cr->session_key, cr->session_key_len) <= 0) {
- /* OpenSSL failure */
- crypto_recipients_free(cr);
- return NULL;
- }
-
- /* Generate an IV if possible */
- if ((iv_len = EVP_CIPHER_iv_length(ec))) {
- iv = (unsigned char *) malloc(iv_len);
- if (!iv) {
- /* Malloc failure */
- crypto_recipients_free(cr);
- return NULL;
- }
-
- /* Generate random IV */
- if (RAND_bytes(iv, iv_len) <= 0) {
- /* OpenSSL failure */
- crypto_recipients_free(cr);
- return NULL;
- }
-
- /* Store it in our ASN.1 structure */
- if (!M_ASN1_OCTET_STRING_set(cr->cryptoData->iv, iv, iv_len)) {
- /* Allocation failed in OpenSSL */
- crypto_recipients_free(cr);
- return NULL;
- }
- }
-
- /*
- * Create RecipientInfo structures for supplied
- * public keys.
- */
- foreach_alist(keypair, pubkeys) {
- RecipientInfo *ri;
- unsigned char *ekey;
- int ekey_len;
-
- ri = RecipientInfo_new();
- if (!ri) {
- /* Allocation failed in OpenSSL */
- crypto_recipients_free(cr);
- return NULL;
- }
-
- /* Set the ASN.1 structure version number */
- ASN1_INTEGER_set(ri->version, BACULA_ASN1_VERSION);
-
- /* Drop the string allocated by OpenSSL, and add our subjectKeyIdentifier */
- M_ASN1_OCTET_STRING_free(ri->subjectKeyIdentifier);
- ri->subjectKeyIdentifier = M_ASN1_OCTET_STRING_dup(keypair->keyid);
-
- /* Set our key encryption algorithm. We currently require RSA */
- assert(keypair->pubkey && EVP_PKEY_type(keypair->pubkey->type) == EVP_PKEY_RSA);
- ri->keyEncryptionAlgorithm = OBJ_nid2obj(NID_rsaEncryption);
-
- /* Encrypt the session key */
- ekey = (unsigned char *) malloc(EVP_PKEY_size(keypair->pubkey));
- if (!ekey) {
- RecipientInfo_free(ri);
- crypto_recipients_free(cr);
- return NULL;
- }
-
- if ((ekey_len = EVP_PKEY_encrypt(ekey, cr->session_key, cr->session_key_len, keypair->pubkey)) <= 0) {
- /* OpenSSL failure */
- RecipientInfo_free(ri);
- crypto_recipients_free(cr);
- free(ekey);
- return NULL;
- }
-
- /* Store it in our ASN.1 structure */
- if (!M_ASN1_OCTET_STRING_set(ri->encryptedKey, ekey, ekey_len)) {
- /* Allocation failed in OpenSSL */
- RecipientInfo_free(ri);
- crypto_recipients_free(cr);
- free(ekey);
- return NULL;
- }
-
- /* Free the encrypted key buffer */
- free(ekey);
-
- /* Push the new RecipientInfo structure onto the stack */
- sk_RecipientInfo_push(cr->cryptoData->recipientInfo, ri);
- }
-
- return cr;
-}
-
-/*
- * Free memory associated with a crypto recipient object.
- */
-void crypto_recipients_free (CRYPTO_RECIPIENTS *cr)
-{
- CryptoData_free(cr->cryptoData);
- free(cr);
-}
-
-/*
- * Perform global initialization of OpenSSL
- * This function is not thread safe.
- * Returns: 0 on success
- * errno on failure
- */
-int init_crypto (void)
-{
- int stat;
-
- if ((stat = openssl_init_threads()) != 0) {
- Emsg1(M_ABORT, 0, _("Unable to init OpenSSL threading: ERR=%s\n"), strerror(stat));
- }
-
- /* Load libssl and libcrypto human-readable error strings */
- SSL_load_error_strings();
-
- /* Register OpenSSL ciphers */
- SSL_library_init();
-
- if (!openssl_seed_prng()) {
- Emsg0(M_ERROR_TERM, 0, _("Failed to seed OpenSSL PRNG\n"));
- }
-
- crypto_initialized = true;
-
- return stat;
-}
-
-/*
- * Perform global cleanup of OpenSSL
- * All cryptographic operations must be completed before calling this function.
- * This function is not thread safe.
- * Returns: 0 on success
- * errno on failure
- */
-int cleanup_crypto (void)
-{
- /*
- * Ensure that we've actually been initialized; Doing this here decreases the
- * complexity of client's termination/cleanup code.
- */
- if (!crypto_initialized) {
- return 0;
- }
-
- if (!openssl_save_prng()) {
- Emsg0(M_ERROR, 0, _("Failed to save OpenSSL PRNG\n"));
- }
-
- openssl_cleanup_threads();
-
- /* Free libssl and libcrypto error strings */
- ERR_free_strings();
-
- /* Free memory used by PRNG */
- RAND_cleanup();
-
- crypto_initialized = false;
-
- return 0;
-}
-
-
-#else /* HAVE_OPENSSL */
-# error No encryption library available
-#endif /* HAVE_OPENSSL */
-
-#else /* HAVE_CRYPTO */
-
-/*
- * Cryptography Support Disabled
- */
-
-/* Message Digest Structure */
-struct Digest {
- crypto_digest_t type;
- union {
- SHA1Context sha1;
- MD5Context md5;
- };
-};
-
-/* Dummy Signature Structure */
-struct Signature {
-};
-
-DIGEST *crypto_digest_new (crypto_digest_t type)
-{
- DIGEST *digest;
-
- digest = (DIGEST *) malloc(sizeof(DIGEST));
- digest->type = type;
-
- switch (type) {
- case CRYPTO_DIGEST_MD5:
- MD5Init(&digest->md5);
- break;
- case CRYPTO_DIGEST_SHA1:
- SHA1Init(&digest->sha1);
- break;
- default:
- Emsg0(M_ERROR, 0, _("Unsupported digest type specified\n"));
- free(digest);
- return NULL;
- }
-
- return (digest);
-}
-
-bool crypto_digest_update (DIGEST *digest, const void *data, size_t length) {
- switch (digest->type) {
- case CRYPTO_DIGEST_MD5:
- /* Doesn't return anything ... */
- MD5Update(&digest->md5, (unsigned char *) data, length);
- return true;
- case CRYPTO_DIGEST_SHA1:
- int ret;
- if ((ret = SHA1Update(&digest->sha1, (const u_int8_t *) data, length)) == shaSuccess) {
- return true;
- } else {
- Emsg1(M_ERROR, 0, _("SHA1Update() returned an error: %d\n"), ret);
- return false;
- }
- break;
- default:
- return false;
- }
-}
-
-bool crypto_digest_finalize (DIGEST *digest, void *dest, size_t *length) {
-
- switch (digest->type) {
- case CRYPTO_DIGEST_MD5:
- /* Guard against programmer error by either the API client or
- * an out-of-sync CRYPTO_DIGEST_MAX_SIZE */
- assert(*length >= CRYPTO_DIGEST_MD5_SIZE);
- *length = CRYPTO_DIGEST_MD5_SIZE;
- /* Doesn't return anything ... */
- MD5Final((unsigned char *) dest, &digest->md5);
- return true;
- case CRYPTO_DIGEST_SHA1:
- /* Guard against programmer error by either the API client or
- * an out-of-sync CRYPTO_DIGEST_MAX_SIZE */
- assert(*length >= CRYPTO_DIGEST_SHA1_SIZE);
- *length = CRYPTO_DIGEST_SHA1_SIZE;
- if (SHA1Final(&digest->sha1, (u_int8_t *) dest) == shaSuccess) {
- return true;
- } else {
- return false;
- }
- break;
- default:
- return false;
- }
-
- return false;
-}
-
-void crypto_digest_free (DIGEST *digest)
-{
- free (digest);
-}
-
-/* Dummy routines */
-int init_crypto (void) { return 0; }
-int cleanup_crypto (void) { return 0; }
-
-SIGNATURE *crypto_sign_new (void) { return NULL; }
-
-crypto_error_t crypto_sign_get_digest (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST **digest) { return CRYPTO_ERROR_INTERNAL; }
-crypto_error_t crypto_sign_verify (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest) { return CRYPTO_ERROR_INTERNAL; }
-
-int crypto_sign_add_signer (SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair) { return false; }
-int crypto_sign_encode (SIGNATURE *sig, void *dest, size_t *length) { return false; }
-
-SIGNATURE *crypto_sign_decode (const void *sigData, size_t length) { return false; }
-void crypto_sign_free (SIGNATURE *sig) { }
-
-
-X509_KEYPAIR *crypto_keypair_new (void) { return NULL; }
-X509_KEYPAIR *crypto_keypair_dup (X509_KEYPAIR *keypair) { return NULL; }
-int crypto_keypair_load_cert (X509_KEYPAIR *keypair, const char *file) { return false; }
-int crypto_keypair_load_key (X509_KEYPAIR *keypair, const char *file, CRYPTO_PEM_PASSWD_CB *pem_callback, const void *pem_userdata) { return false; }
-void crypto_keypair_free (X509_KEYPAIR *keypair) { }
-
-CRYPTO_RECIPIENTS *crypto_recipients_new (crypto_cipher_t cipher, alist *pubkeys) { return NULL; }
-void crypto_recipients_free (CRYPTO_RECIPIENTS *cr) { }
-
-#endif /* HAVE_CRYPTO */
-
-/* Shared Code */
-
-/*
- * Default PEM encryption passphrase callback.
- * Returns an empty password.
- */
-int crypto_default_pem_callback(char *buf, int size, const void *userdata)
-{
- bstrncpy(buf, "", size);
- return (strlen(buf));
-}
-
-/*
- * Returns the ASCII name of the digest type.
- * Returns: ASCII name of digest type.
- */
-const char *crypto_digest_name (DIGEST *digest) {
- switch (digest->type) {
- case CRYPTO_DIGEST_MD5:
- return "MD5";
- case CRYPTO_DIGEST_SHA1:
- return "SHA1";
- case CRYPTO_DIGEST_SHA256:
- return "SHA256";
- case CRYPTO_DIGEST_SHA512:
- return "SHA512";
- case CRYPTO_DIGEST_NONE:
- return "None";
- default:
- return "Invalid Digest Type";
- }
-
-}
-
-/*
- * Given a stream type, returns the associated
- * crypto_digest_t value.
- */
-crypto_digest_t crypto_digest_stream_type (int stream) {
- switch (stream) {
- case STREAM_MD5_DIGEST:
- return CRYPTO_DIGEST_MD5;
- case STREAM_SHA1_DIGEST:
- return CRYPTO_DIGEST_SHA1;
- case STREAM_SHA256_DIGEST:
- return CRYPTO_DIGEST_SHA256;
- case STREAM_SHA512_DIGEST:
- return CRYPTO_DIGEST_SHA512;
- default:
- return CRYPTO_DIGEST_NONE;
- }
-}
-
-/*
- * * Given a crypto_error_t value, return the associated
- * * error string
- * */
-const char *crypto_strerror(crypto_error_t error) {
- switch (error) {
- case CRYPTO_ERROR_NONE:
- return "No error";
- case CRYPTO_ERROR_NOSIGNER:
- return "Signer not found";
- case CRYPTO_ERROR_INVALID_DIGEST:
- return "Unsupported digest algorithm";
- case CRYPTO_ERROR_BAD_SIGNATURE:
- return "Signature is invalid";
- case CRYPTO_ERROR_INTERNAL:
- /* This shouldn't happen */
- return "Internal error";
- default:
- return "Unknown error";
- }
-}
+++ /dev/null
-/*
- * crypto.h Encryption support functions
- *
- * Author: Landon Fuller <landonf@opendarwin.org>
- *
- * Version $Id$
- *
- * Copyright (C) 2005 Kern Sibbald
- *
- * This file was contributed to the Bacula project by Landon Fuller.
- *
- * Landon Fuller has been granted a perpetual, worldwide, non-exclusive,
- * no-charge, royalty-free, irrevocable copyright * license to reproduce,
- * prepare derivative works of, publicly display, publicly perform,
- * sublicense, and distribute the original work contributed by Landon Fuller
- * to the Bacula project in source or object form.
- *
- * If you wish to license these contributions under an alternate open source
- * license please contact Landon Fuller <landonf@opendarwin.org>.
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-#ifndef __CRYPTO_H_
-#define __CRYPTO_H_
-
-/* Opaque X509 Public/Private Key Pair Structure */
-typedef struct X509_Keypair X509_KEYPAIR;
-
-/* Opaque Message Digest Structure */
-typedef struct Digest DIGEST;
-
-/* Opaque Message Signature Structure */
-typedef struct Signature SIGNATURE;
-
-/* Opaque PKI Symmetric Key Data Structure */
-typedef struct Crypto_Recipients CRYPTO_RECIPIENTS;
-
-/* PEM Decryption Passphrase Callback */
-typedef int (CRYPTO_PEM_PASSWD_CB) (char *buf, int size, const void *userdata);
-
-/* Digest Types */
-typedef enum {
- /* These are stored on disk and MUST NOT change */
- CRYPTO_DIGEST_NONE = 0,
- CRYPTO_DIGEST_MD5 = 1,
- CRYPTO_DIGEST_SHA1 = 2,
- CRYPTO_DIGEST_SHA256 = 3,
- CRYPTO_DIGEST_SHA512 = 4
-} crypto_digest_t;
-
-/* Cipher Types */
-typedef enum {
- /* These are not stored on disk */
- CRYPTO_CIPHER_AES_128_CBC,
- CRYPTO_CIPHER_AES_192_CBC,
- CRYPTO_CIPHER_AES_256_CBC,
- CRYPTO_CIPHER_BLOWFISH_CBC
-} crypto_cipher_t;
-
-/* Crypto API Errors */
-typedef enum {
- CRYPTO_ERROR_NONE = 0, /* No error */
- CRYPTO_ERROR_NOSIGNER = 1, /* Signer not found */
- CRYPTO_ERROR_INVALID_DIGEST = 2, /* Unsupported digest algorithm */
- CRYPTO_ERROR_BAD_SIGNATURE = 3, /* Signature is invalid */
- CRYPTO_ERROR_INTERNAL = 4 /* Internal Error */
-} crypto_error_t;
-
-/* Message Digest Sizes */
-#define CRYPTO_DIGEST_MD5_SIZE 16 /* 128 bits */
-#define CRYPTO_DIGEST_SHA1_SIZE 20 /* 160 bits */
-#define CRYPTO_DIGEST_SHA256_SIZE 32 /* 256 bits */
-#define CRYPTO_DIGEST_SHA512_SIZE 64 /* 512 bits */
-
-/* Maximum Message Digest Size */
-#ifdef HAVE_OPENSSL
-
-/* Let OpenSSL define it */
-#define CRYPTO_DIGEST_MAX_SIZE EVP_MAX_MD_SIZE
-
-#else /* HAVE_OPENSSL */
-
-/*
- * This must be kept in sync with the available message digest algorithms.
- * Just in case someone forgets, I've added assertions
- * to crypto_digest_finalize().
- * MD5: 128 bits
- * SHA-1: 160 bits
- */
-#ifndef HAVE_SHA2
-#define CRYPTO_DIGEST_MAX_SIZE CRYPTO_DIGEST_SHA1_SIZE
-#else
-#define CRYPTO_DIGEST_MAX_SIZE CRYPTO_DIGEST_SHA512_SIZE
-#endif
-
-#endif /* HAVE_OPENSSL */
-
-#endif /* __CRYPTO_H_ */
#include "bacula.h"
-#define PAD_LEN 64 /* PAD length */
-#define SIG_LEN MD5HashSize /* MD5 digest length */
+#define PAD_LEN 64 /* PAD length */
+#define SIG_LEN 16 /* MD5 signature length */
void
hmac_md5(
case JT_BACKUP:
case JT_VERIFY:
case JT_RESTORE:
- case JT_MIGRATE:
- case JT_COPY:
case JT_ADMIN:
num_jobs_run++;
last_job.Errors = jcr->Errors;
#include "smartall.h"
#include "alist.h"
#include "dlist.h"
-#include "base64.h"
#include "bits.h"
#include "btime.h"
-#include "crypto.h"
#include "mem_pool.h"
#include "message.h"
-#include "openssl.h"
#include "lex.h"
#include "parse_conf.h"
#include "tls.h"
#ifndef __BMD5_H
#define __BMD5_H
-#define MD5HashSize 16
-
struct MD5Context {
uint32_t buf[4];
uint32_t bits[2];
+++ /dev/null
-/*
- * openssl.c OpenSSL support functions
- *
- * Author: Landon Fuller <landonf@opendarwin.org>
- *
- * Version $Id$
- *
- * Copyright (C) 2005 Kern Sibbald
- *
- * This file was contributed to the Bacula project by Landon Fuller.
- *
- * Landon Fuller has been granted a perpetual, worldwide, non-exclusive,
- * no-charge, royalty-free, irrevocable copyright license to reproduce,
- * prepare derivative works of, publicly display, publicly perform,
- * sublicense, and distribute the original work contributed by Landon Fuller
- * to the Bacula project in source or object form.
- *
- * If you wish to license these contributions under an alternate open source
- * license please contact Landon Fuller <landonf@opendarwin.org>.
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-
-#include "bacula.h"
-#include <assert.h>
-
-#ifdef HAVE_OPENSSL
-
-/* Array of mutexes for use with OpenSSL static locking */
-static pthread_mutex_t *mutexes;
-
-/* OpenSSL dynamic locking structure */
-struct CRYPTO_dynlock_value {
- pthread_mutex_t mutex;
-};
-
-
-/*
- * Post all per-thread openssl errors
- */
-void openssl_post_errors(int code, const char *errstring)
-{
- char buf[512];
- unsigned long sslerr;
-
- /* Pop errors off of the per-thread queue */
- while((sslerr = ERR_get_error()) != 0) {
- /* Acquire the human readable string */
- ERR_error_string_n(sslerr, (char *) &buf, sizeof(buf));
- Emsg2(M_ERROR, 0, "%s: ERR=%s\n", errstring, buf);
- }
-}
-
-/*
- * Return an OpenSSL thread ID
- * Returns: thread ID
- *
- */
-static unsigned long get_openssl_thread_id (void)
-{
- /* Comparison without use of pthread_equal() is mandated by the OpenSSL API */
- return ((unsigned long) pthread_self());
-}
-
-/*
- * Allocate a dynamic OpenSSL mutex
- */
-static struct CRYPTO_dynlock_value *openssl_create_dynamic_mutex (const char *file, int line)
-{
- struct CRYPTO_dynlock_value *dynlock;
- int stat;
-
- dynlock = (struct CRYPTO_dynlock_value *) malloc(sizeof(struct CRYPTO_dynlock_value));
-
- if ((stat = pthread_mutex_init(&dynlock->mutex, NULL)) != 0) {
- Emsg1(M_ABORT, 0, _("Unable to init mutex: ERR=%s\n"), strerror(stat));
- }
-
- return dynlock;
-}
-
-static void openssl_update_dynamic_mutex (int mode, struct CRYPTO_dynlock_value *dynlock, const char *file, int line)
-{
- if (mode & CRYPTO_LOCK) {
- P(dynlock->mutex);
- } else {
- V(dynlock->mutex);
- }
-}
-
-static void openssl_destroy_dynamic_mutex (struct CRYPTO_dynlock_value *dynlock, const char *file, int line)
-{
- int stat;
-
- if ((stat = pthread_mutex_destroy(&dynlock->mutex)) != 0) {
- Emsg1(M_ABORT, 0, _("Unable to destroy mutex: ERR=%s\n"), strerror(stat));
- }
-
- free(dynlock);
-}
-
-/*
- * (Un)Lock a static OpenSSL mutex
- */
-static void openssl_update_static_mutex (int mode, int i, const char *file, int line)
-{
- if (mode & CRYPTO_LOCK) {
- P(mutexes[i]);
- } else {
- V(mutexes[i]);
- }
-}
-
-/*
- * Initialize OpenSSL thread support
- * Returns: 0 on success
- * errno on failure
- */
-int openssl_init_threads (void)
-{
- int i, numlocks;
- int stat;
-
-
- /* Set thread ID callback */
- CRYPTO_set_id_callback(get_openssl_thread_id);
-
- /* Initialize static locking */
- numlocks = CRYPTO_num_locks();
- mutexes = (pthread_mutex_t *) malloc(numlocks * sizeof(pthread_mutex_t));
- for (i = 0; i < numlocks; i++) {
- if ((stat = pthread_mutex_init(&mutexes[i], NULL)) != 0) {
- Emsg1(M_ERROR, 0, _("Unable to init mutex: ERR=%s\n"), strerror(stat));
- return stat;
- }
- }
-
- /* Set static locking callback */
- CRYPTO_set_locking_callback(openssl_update_static_mutex);
-
- /* Initialize dyanmic locking */
- CRYPTO_set_dynlock_create_callback(openssl_create_dynamic_mutex);
- CRYPTO_set_dynlock_lock_callback(openssl_update_dynamic_mutex);
- CRYPTO_set_dynlock_destroy_callback(openssl_destroy_dynamic_mutex);
-
- return 0;
-}
-
-/*
- * Clean up OpenSSL threading support
- */
-void openssl_cleanup_threads (void)
-{
- int i, numlocks;
- int stat;
-
- /* Unset thread ID callback */
- CRYPTO_set_id_callback(NULL);
-
- /* Deallocate static lock mutexes */
- numlocks = CRYPTO_num_locks();
- for (i = 0; i < numlocks; i++) {
- if ((stat = pthread_mutex_destroy(&mutexes[i])) != 0) {
- /* We don't halt execution, reporting the error should be sufficient */
- Emsg1(M_ERROR, 0, _("Unable to destroy mutex: ERR=%s\n"), strerror(stat));
- }
- }
-
- /* Unset static locking callback */
- CRYPTO_set_locking_callback(NULL);
-
- /* Free static lock array */
- free(mutexes);
-
- /* Unset dynamic locking callbacks */
- CRYPTO_set_dynlock_create_callback(NULL);
- CRYPTO_set_dynlock_lock_callback(NULL);
- CRYPTO_set_dynlock_destroy_callback(NULL);
-}
-
-
-/*
- * Seed OpenSSL PRNG
- * Returns: 1 on success
- * 0 on failure
- */
-int openssl_seed_prng (void)
-{
- const char *names[] = { "/dev/urandom", "/dev/random", NULL };
- int i;
-
- // ***FIXME***
- // Win32 Support
- // Read saved entropy?
-
- for (i = 0; names[i]; i++) {
- if (RAND_load_file(names[i], 1024) != -1) {
- /* Success */
- return 1;
- }
- }
-
- /* Fail */
- return 0;
-}
-
-/*
- * Save OpenSSL Entropy
- * Returns: 1 on success
- * 0 on failure
- */
-int openssl_save_prng (void)
-{
- // ***FIXME***
- // Implement PRNG state save
- return 1;
-}
-
-#endif /* HAVE_OPENSSL */
+++ /dev/null
-/*
- * openssl.h OpenSSL support functions
- *
- * Author: Landon Fuller <landonf@opendarwin.org>
- *
- * Version $Id$
- *
- * Copyright (C) 2005 Kern Sibbald
- *
- * This file was contributed to the Bacula project by Landon Fuller.
- *
- * Landon Fuller has been granted a perpetual, worldwide, non-exclusive,
- * no-charge, royalty-free, irrevocable copyright * license to reproduce,
- * prepare derivative works of, publicly display, publicly perform,
- * sublicense, and distribute the original work contributed by Landon Fuller
- * to the Bacula project in source or object form.
- *
- * If you wish to license these contributions under an alternate open source
- * license please contact Landon Fuller <landonf@opendarwin.org>.
- */
-/*
- Copyright (C) 2005 Kern Sibbald
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- version 2 as amended with additional clauses defined in the
- file LICENSE in the main source directory.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- the file LICENSE for additional details.
-
- */
-
-#ifndef __OPENSSL_H_
-#define __OPENSSL_H_
-
-#ifdef HAVE_OPENSSL
-void openssl_post_errors (int code, const char *errstring);
-int openssl_init_threads (void);
-void openssl_cleanup_threads (void);
-int openssl_seed_prng (void);
-int openssl_save_prng (void);
-#endif /* HAVE_OPENSSL */
-
-#endif /* __OPENSSL_H_ */
{
unsigned int i, j;
struct MD5Context md5c;
- unsigned char digest[CRYPTO_DIGEST_MD5_SIZE];
+ unsigned char signature[16];
char sig[100];
if (pass == 1) {
MD5Init(&md5c);
MD5Update(&md5c, (unsigned char *) (lc->str), lc->str_len);
- MD5Final(digest, &md5c);
- for (i = j = 0; i < sizeof(digest); i++) {
- sprintf(&sig[j], "%02x", digest[i]);
+ MD5Final(signature, &md5c);
+ for (i = j = 0; i < sizeof(signature); i++) {
+ sprintf(&sig[j], "%02x", signature[i]);
j += 2;
}
*(item->value) = bstrdup(sig);
uint32_t bcrc32(uint8_t *buf, int len);
-/* crypto.c */
-int init_crypto (void);
-int cleanup_crypto (void);
-DIGEST * crypto_digest_new (crypto_digest_t type);
-bool crypto_digest_update (DIGEST *digest, const void *data, size_t length);
-bool crypto_digest_finalize (DIGEST *digest, void *dest, size_t *length);
-void crypto_digest_free (DIGEST *digest);
-SIGNATURE * crypto_sign_new (void);
-crypto_error_t crypto_sign_get_digest (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST **digest);
-crypto_error_t crypto_sign_verify (SIGNATURE *sig, X509_KEYPAIR *keypair, DIGEST *digest);
-int crypto_sign_add_signer (SIGNATURE *sig, DIGEST *digest, X509_KEYPAIR *keypair);
-int crypto_sign_encode (SIGNATURE *sig, void *dest, size_t *length);
-SIGNATURE * crypto_sign_decode (const void *sigData, size_t length);
-void crypto_sign_free (SIGNATURE *sig);
-CRYPTO_RECIPIENTS *crypto_recipients_new (crypto_cipher_t cipher, alist *pubkeys);
-void crypto_recipients_free (CRYPTO_RECIPIENTS *cr);
-X509_KEYPAIR * crypto_keypair_new (void);
-X509_KEYPAIR * crypto_keypair_dup (X509_KEYPAIR *keypair);
-int crypto_keypair_load_cert (X509_KEYPAIR *keypair, const char *file);
-int crypto_keypair_load_key (X509_KEYPAIR *keypair, const char *file, CRYPTO_PEM_PASSWD_CB *pem_callback, const void *pem_userdata);
-void crypto_keypair_free (X509_KEYPAIR *keypair);
-int crypto_default_pem_callback (char *buf, int size, const void *userdata);
-const char * crypto_digest_name (DIGEST *digest);
-crypto_digest_t crypto_digest_stream_type (int stream);
-const char * crypto_strerror (crypto_error_t error);
-
/* daemon.c */
void daemon_start ();
/* tls.c */
+int init_tls (void);
+int cleanup_tls (void);
+
TLS_CONTEXT *new_tls_context (const char *ca_certfile,
const char *ca_certdir,
const char *certfile,
const char *keyfile,
- CRYPTO_PEM_PASSWD_CB *pem_callback,
+ TLS_PEM_PASSWD_CB *pem_callback,
const void *pem_userdata,
const char *dhfile,
bool verify_peer);
*/
-
#include "bacula.h"
#include <assert.h>
/* No anonymous ciphers, no <128 bit ciphers, no export ciphers, no MD5 ciphers */
#define TLS_DEFAULT_CIPHERS "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"
+/* Array of mutexes for use with OpenSSL static locking */
+static pthread_mutex_t *mutexes;
+
+/* OpenSSL dynamic locking structure */
+struct CRYPTO_dynlock_value {
+ pthread_mutex_t mutex;
+};
+
+/* Are we initialized? */
+static int tls_initialized = false;
+
/* TLS Context Structure */
struct TLS_Context {
SSL_CTX *openssl;
- CRYPTO_PEM_PASSWD_CB *pem_callback;
+ TLS_PEM_PASSWD_CB *pem_callback;
const void *pem_userdata;
};
SSL *openssl;
};
+/* post all per-thread openssl errors */
+static void openssl_post_errors(int code, const char *errstring)
+{
+ char buf[512];
+ unsigned long sslerr;
+
+ /* Pop errors off of the per-thread queue */
+ while((sslerr = ERR_get_error()) != 0) {
+ /* Acquire the human readable string */
+ ERR_error_string_n(sslerr, (char *) &buf, sizeof(buf));
+ Emsg2(M_ERROR, 0, "%s: ERR=%s\n", errstring, buf);
+ }
+}
+
/*
* OpenSSL certificate verification callback.
* OpenSSL has already performed internal certificate verification.
return ok;
}
+/*
+ * Default PEM encryption passphrase callback.
+ * Returns an empty password.
+ */
+static int tls_default_pem_callback(char *buf, int size, const void *userdata)
+{
+ bstrncpy(buf, "", size);
+ return (strlen(buf));
+}
+
/* Dispatch user PEM encryption callbacks */
-static int tls_pem_callback_dispatch (char *buf, int size, int rwflag, void *userdata)
+static int openssl_pem_callback_dispatch (char *buf, int size, int rwflag, void *userdata)
{
TLS_CONTEXT *ctx = (TLS_CONTEXT *) userdata;
return (ctx->pem_callback(buf, size, ctx->pem_userdata));
*/
TLS_CONTEXT *new_tls_context(const char *ca_certfile, const char *ca_certdir,
const char *certfile, const char *keyfile,
- CRYPTO_PEM_PASSWD_CB *pem_callback,
+ TLS_PEM_PASSWD_CB *pem_callback,
const void *pem_userdata, const char *dhfile,
bool verify_peer)
{
ctx->pem_callback = pem_callback;
ctx->pem_userdata = pem_userdata;
} else {
- ctx->pem_callback = crypto_default_pem_callback;
+ ctx->pem_callback = tls_default_pem_callback;
ctx->pem_userdata = NULL;
}
- SSL_CTX_set_default_passwd_cb(ctx->openssl, tls_pem_callback_dispatch);
+ SSL_CTX_set_default_passwd_cb(ctx->openssl, openssl_pem_callback_dispatch);
SSL_CTX_set_default_passwd_cb_userdata(ctx->openssl, (void *) ctx);
/*
for (j = 0; j < sk_CONF_VALUE_num(val); j++) {
nval = sk_CONF_VALUE_value(val, j);
if (strcmp(nval->name, "DNS") == 0) {
- if (strcasecmp(nval->value, host) == 0) {
+ if (strcasecmp(nval->name, host) == 0) {
auth_success = true;
goto success;
}
return (openssl_bsock_readwrite(bsock, ptr, nbytes, false));
}
+/*
+ * Return an OpenSSL thread ID
+ * Returns: thread ID
+ *
+ */
+static unsigned long get_openssl_thread_id (void)
+{
+ /* Comparison without use of pthread_equal() is mandated by the OpenSSL API */
+ return ((unsigned long) pthread_self());
+}
+
+/*
+ * Allocate a dynamic OpenSSL mutex
+ */
+static struct CRYPTO_dynlock_value *openssl_create_dynamic_mutex (const char *file, int line)
+{
+ struct CRYPTO_dynlock_value *dynlock;
+ int stat;
+
+ dynlock = (struct CRYPTO_dynlock_value *) malloc(sizeof(struct CRYPTO_dynlock_value));
+
+ if ((stat = pthread_mutex_init(&dynlock->mutex, NULL)) != 0) {
+ Emsg1(M_ABORT, 0, _("Unable to init mutex: ERR=%s\n"), strerror(stat));
+ }
+
+ return dynlock;
+}
+
+static void openssl_update_dynamic_mutex (int mode, struct CRYPTO_dynlock_value *dynlock, const char *file, int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ P(dynlock->mutex);
+ } else {
+ V(dynlock->mutex);
+ }
+}
+
+static void openssl_destroy_dynamic_mutex (struct CRYPTO_dynlock_value *dynlock, const char *file, int line)
+{
+ int stat;
+
+ if ((stat = pthread_mutex_destroy(&dynlock->mutex)) != 0) {
+ Emsg1(M_ABORT, 0, _("Unable to destroy mutex: ERR=%s\n"), strerror(stat));
+ }
+
+ free(dynlock);
+}
+
+/*
+ * (Un)Lock a static OpenSSL mutex
+ */
+static void openssl_update_static_mutex (int mode, int i, const char *file, int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ P(mutexes[i]);
+ } else {
+ V(mutexes[i]);
+ }
+}
+
+/*
+ * Initialize OpenSSL thread support
+ * Returns: 0 on success
+ * errno on failure
+ */
+static int openssl_init_threads (void)
+{
+ int i, numlocks;
+ int stat;
+
+
+ /* Set thread ID callback */
+ CRYPTO_set_id_callback(get_openssl_thread_id);
+
+ /* Initialize static locking */
+ numlocks = CRYPTO_num_locks();
+ mutexes = (pthread_mutex_t *) malloc(numlocks * sizeof(pthread_mutex_t));
+ for (i = 0; i < numlocks; i++) {
+ if ((stat = pthread_mutex_init(&mutexes[i], NULL)) != 0) {
+ Emsg1(M_ERROR, 0, _("Unable to init mutex: ERR=%s\n"), strerror(stat));
+ return stat;
+ }
+ }
+
+ /* Set static locking callback */
+ CRYPTO_set_locking_callback(openssl_update_static_mutex);
+
+ /* Initialize dyanmic locking */
+ CRYPTO_set_dynlock_create_callback(openssl_create_dynamic_mutex);
+ CRYPTO_set_dynlock_lock_callback(openssl_update_dynamic_mutex);
+ CRYPTO_set_dynlock_destroy_callback(openssl_destroy_dynamic_mutex);
+
+ return 0;
+}
+
+/*
+ * Clean up OpenSSL threading support
+ */
+static void openssl_cleanup_threads (void)
+{
+ int i, numlocks;
+ int stat;
+
+ /* Unset thread ID callback */
+ CRYPTO_set_id_callback(NULL);
+
+ /* Deallocate static lock mutexes */
+ numlocks = CRYPTO_num_locks();
+ for (i = 0; i < numlocks; i++) {
+ if ((stat = pthread_mutex_destroy(&mutexes[i])) != 0) {
+ /* We don't halt execution, reporting the error should be sufficient */
+ Emsg1(M_ERROR, 0, _("Unable to destroy mutex: ERR=%s\n"), strerror(stat));
+ }
+ }
+
+ /* Unset static locking callback */
+ CRYPTO_set_locking_callback(NULL);
+
+ /* Free static lock array */
+ free(mutexes);
+
+ /* Unset dynamic locking callbacks */
+ CRYPTO_set_dynlock_create_callback(NULL);
+ CRYPTO_set_dynlock_lock_callback(NULL);
+ CRYPTO_set_dynlock_destroy_callback(NULL);
+}
+
+
+/*
+ * Seed TLS PRNG
+ * Returns: 1 on success
+ * 0 on failure
+ */
+static int seed_tls_prng (void)
+{
+ const char *names[] = { "/dev/urandom", "/dev/random", NULL };
+ int i;
+
+ // ***FIXME***
+ // Win32 Support
+ // Read saved entropy?
+
+ for (i = 0; names[i]; i++) {
+ if (RAND_load_file(names[i], 1024) != -1) {
+ /* Success */
+ return 1;
+ }
+ }
+
+ /* Fail */
+ return 0;
+}
+
+/*
+ * Save TLS Entropy
+ * Returns: 1 on success
+ * 0 on failure
+ */
+static int save_tls_prng (void)
+{
+ // ***FIXME***
+ // Implement PRNG state save
+ return 1;
+}
+
+/*
+ * Perform global initialization of TLS
+ * This function is not thread safe.
+ * Returns: 0 on success
+ * errno on failure
+ */
+int init_tls (void)
+{
+ int stat;
+
+ if ((stat = openssl_init_threads()) != 0) {
+ Emsg1(M_ABORT, 0, _("Unable to init OpenSSL threading: ERR=%s\n"), strerror(stat));
+ }
+
+ /* Load libssl and libcrypto human-readable error strings */
+ SSL_load_error_strings();
+
+ /* Register OpenSSL ciphers */
+ SSL_library_init();
+
+ if (!seed_tls_prng()) {
+ Emsg0(M_ERROR_TERM, 0, _("Failed to seed OpenSSL PRNG\n"));
+ }
+
+ tls_initialized = true;
+
+ return stat;
+}
+
+/*
+ * Perform global cleanup of TLS
+ * All TLS connections must be closed before calling this function.
+ * This function is not thread safe.
+ * Returns: 0 on success
+ * errno on failure
+ */
+int cleanup_tls (void)
+{
+ /*
+ * Ensure that we've actually been initialized; Doing this here decreases the
+ * complexity of client's termination/cleanup code.
+ */
+ if (!tls_initialized) {
+ return 0;
+ }
+
+ if (!save_tls_prng()) {
+ Emsg0(M_ERROR, 0, _("Failed to save OpenSSL PRNG\n"));
+ }
+
+ openssl_cleanup_threads();
+
+ /* Free libssl and libcrypto error strings */
+ ERR_free_strings();
+
+ /* Free memory used by PRNG */
+ RAND_cleanup();
+
+ tls_initialized = false;
+
+ return 0;
+}
+
#else /* HAVE_OPENSSL */
# error No TLS implementation available.
#endif /* !HAVE_OPENSSL */
#else
/* Dummy routines */
+int init_tls(void) { return 0; }
+int cleanup_tls (void) { return 0; }
TLS_CONTEXT *new_tls_context(const char *ca_certfile, const char *ca_certdir,
const char *certfile, const char *keyfile,
- CRYPTO_PEM_PASSWD_CB *pem_callback,
+ TLS_PEM_PASSWD_CB *pem_callback,
const void *pem_userdata, const char *dhfile,
bool verify_peer)
{
/* Opaque TLS Connection Structure */
typedef struct TLS_Connection TLS_CONNECTION;
+/* PEM Decryption Passphrase Callback */
+typedef int (TLS_PEM_PASSWD_CB) (char *buf, int size, const void *userdata);
+
#endif /* __TLS_H_ */
*/
+
#include "bacula.h"
#include "jcr.h"
#include "findlib/find.h"
case JT_ADMIN:
str = _("Admin");
break;
- case JT_MIGRATE:
- str = _("Migrate");
- break;
- case JT_COPY:
- str = _("Copy");
- break;
default:
str = _("Unknown Type");
break;
stream_to_ascii(buf2, rec.Stream, rec.FileIndex), rec.data_len);
/* Send attributes and digest to Director for Catalog */
- if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_UNIX_ATTRIBUTES_EX ||
- crypto_digest_stream_type(stream) != CRYPTO_DIGEST_NONE) {
+ if (stream == STREAM_UNIX_ATTRIBUTES || stream == STREAM_MD5_SIGNATURE ||
+ stream == STREAM_UNIX_ATTRIBUTES_EX || stream == STREAM_SHA1_SIGNATURE) {
if (!jcr->no_attributes) {
if (are_attributes_spooled(jcr)) {
jcr->dir_bsock->spool = true;
#endif
break;
- case STREAM_MD5_DIGEST:
- case STREAM_SHA1_DIGEST:
- case STREAM_SHA256_DIGEST:
- case STREAM_SHA512_DIGEST:
- break;
-
- case STREAM_SIGNED_DIGEST:
- // TODO landonf: Investigate signed digest support in the storage daemon
+ case STREAM_MD5_SIGNATURE:
+ case STREAM_SHA1_SIGNATURE:
break;
case STREAM_PROGRAM_NAMES:
static int create_fileset_record(B_DB *db, FILESET_DBR *fsr);
static int create_jobmedia_record(B_DB *db, JCR *jcr);
static JCR *create_jcr(JOB_DBR *jr, DEV_RECORD *rec, uint32_t JobId);
-static int update_digest_record(B_DB *db, char *digest, DEV_RECORD *rec, int type);
+static int update_SIG_record(B_DB *db, char *SIGbuf, DEV_RECORD *rec, int type);
/* Global variables */
DEVICE *dev = dcr->dev;
JCR *bjcr = dcr->jcr;
DEV_BLOCK *block = dcr->block;
- char digest[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)];
if (rec->data_len > 0) {
mr.VolBytes += rec->data_len + WRITE_RECHDR_LENGTH; /* Accumulate Volume bytes */
free_jcr(mjcr); /* done using JCR */
break;
- case STREAM_MD5_DIGEST:
- bin_to_base64(digest, (char *)rec->data, CRYPTO_DIGEST_MD5_SIZE);
+ case STREAM_MD5_SIGNATURE:
+ char MD5buf[50];
+ bin_to_base64(MD5buf, (char *)rec->data, 16); /* encode 16 bytes */
if (verbose > 1) {
- Pmsg1(000, _("Got MD5 record: %s\n"), digest);
+ Pmsg1(000, _("Got MD5 record: %s\n"), MD5buf);
}
- update_digest_record(db, digest, rec, CRYPTO_DIGEST_MD5);
+ update_SIG_record(db, MD5buf, rec, MD5_SIG);
break;
- case STREAM_SHA1_DIGEST:
- bin_to_base64(digest, (char *)rec->data, CRYPTO_DIGEST_SHA1_SIZE);
+ case STREAM_SHA1_SIGNATURE:
+ char SIGbuf[50];
+ bin_to_base64(SIGbuf, (char *)rec->data, 20); /* encode 20 bytes */
if (verbose > 1) {
- Pmsg1(000, _("Got SHA1 record: %s\n"), digest);
+ Pmsg1(000, _("Got SHA1 record: %s\n"), SIGbuf);
}
- update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA1);
+ update_SIG_record(db, SIGbuf, rec, SHA1_SIG);
break;
- case STREAM_SHA256_DIGEST:
- bin_to_base64(digest, (char *)rec->data, CRYPTO_DIGEST_SHA256_SIZE);
- if (verbose > 1) {
- Pmsg1(000, _("Got SHA256 record: %s\n"), digest);
- }
- update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA256);
- break;
-
- case STREAM_SHA512_DIGEST:
- bin_to_base64(digest, (char *)rec->data, CRYPTO_DIGEST_SHA512_SIZE);
- if (verbose > 1) {
- Pmsg1(000, _("Got SHA512 record: %s\n"), digest);
- }
- update_digest_record(db, digest, rec, CRYPTO_DIGEST_SHA512);
- break;
-
- case STREAM_SIGNED_DIGEST:
- // TODO landonf: Investigate signed digest support in bscan
- if (verbose > 1) {
- Pmsg0(000, _("Got signed digest record\n"));
- }
- break;
case STREAM_PROGRAM_NAMES:
if (verbose) {
/*
* Simulate the database call that updates the MD5/SHA1 record
*/
-static int update_digest_record(B_DB *db, char *digest, DEV_RECORD *rec, int type)
+static int update_SIG_record(B_DB *db, char *SIGbuf, DEV_RECORD *rec, int type)
{
JCR *mjcr;
return 1;
}
- if (!db_add_digest_to_file_record(bjcr, db, mjcr->FileId, digest, type)) {
+ if (!db_add_SIG_to_file_record(bjcr, db, mjcr->FileId, SIGbuf, type)) {
Pmsg1(0, _("Could not add MD5/SHA1 to File record. ERR=%s\n"), db_strerror(db));
free_jcr(mjcr);
return 0;
break; /* read second part of record */
}
ok = record_cb(dcr, rec);
- if (crypto_digest_stream_type(rec->Stream) != CRYPTO_DIGEST_NONE) {
+ if (rec->Stream == STREAM_MD5_SIGNATURE || rec->Stream == STREAM_SHA1_SIGNATURE) {
Dmsg3(300, "Done FI=%u before set_eof pos %u:%u\n", rec->FileIndex,
dev->file, dev->block_num);
if (match_set_eof(jcr->bsr, rec) && try_repositioning(jcr, rec, dev)) {
return "WIN32-DATA";
case STREAM_WIN32_GZIP_DATA:
return "WIN32-GZIP";
- case STREAM_MD5_DIGEST:
+ case STREAM_MD5_SIGNATURE:
return "MD5";
- case STREAM_SHA1_DIGEST:
+ case STREAM_SHA1_SIGNATURE:
return "SHA1";
case STREAM_GZIP_DATA:
return "GZIP";
return "MACOS-RSRC";
case STREAM_HFSPLUS_ATTRIBUTES:
return "HFSPLUS-ATTR";
- case STREAM_SHA256_DIGEST:
- return "SHA256";
- case STREAM_SHA512_DIGEST:
- return "SHA512";
- case STREAM_SIGNED_DIGEST:
- return "SIGNED-DIGEST";
case -STREAM_UNIX_ATTRIBUTES:
return "contUATTR";
case -STREAM_FILE_DATA:
return "contWIN32-DATA";
case -STREAM_WIN32_GZIP_DATA:
return "contWIN32-GZIP";
- case -STREAM_MD5_DIGEST:
+ case -STREAM_MD5_SIGNATURE:
return "contMD5";
- case -STREAM_SHA1_DIGEST:
+ case -STREAM_SHA1_SIGNATURE:
return "contSHA1";
case -STREAM_GZIP_DATA:
return "contGZIP";
return "contMACOS-RSRC";
case -STREAM_HFSPLUS_ATTRIBUTES:
return "contHFSPLUS-ATTR";
- case -STREAM_SHA256_DIGEST:
- return "contSHA256";
- case -STREAM_SHA512_DIGEST:
- return "contSHA512";
- case -STREAM_SIGNED_DIGEST:
- return "contSIGNED-DIGEST";
default:
sprintf(buf, "%d", stream);
return buf;
*
*/
/*
- Copyright (C) 2000-2005 Kern Sibbald
+ Copyright (C) 2000-2006 Kern Sibbald
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
rctx.device_name = device_name;
stat = search_res_for_device(rctx);
if (stat == 1) { /* found available device */
- rctx.suitable_device = true;
Dmsg1(100, "Suitable device found=%s\n", device_name);
ok = true;
break;
} else if (stat == 0) { /* device busy */
Dmsg1(100, "Suitable busy device found=%s\n", device_name);
- rctx.suitable_device = true; /* but it is busy, so continue looking */
+ } else {
+ /* otherwise error */
+ Dmsg0(100, "No suitable device found.\n");
}
- /* otherwise error */
}
if (ok) {
break;
return -1; /* no use waiting */
}
+ rctx.suitable_device = true;
Dmsg2(100, "Try reserve %s jobid=%d\n", rctx.device->hdr.name,
rctx.jcr->JobId);
dcr = new_dcr(rctx.jcr, rctx.device->dev);
*/
#undef VERSION
-#define VERSION "1.39.3"
-#define BDATE "17 December 2005"
-#define LSMDATE "17Dec05"
+#define VERSION "1.38.3"
+#define BDATE "04 January 2006"
+#define LSMDATE "04Jan06"
/* Debug flags */
#undef DEBUG
#define TRACE_FILE 1
/* If this is set stdout will not be closed on startup */
-#define DEVELOPER 1
+/* #define DEVELOPER 1 */
/* #define USE_BSNPRINTF */
-@erase "$(INTDIR)\bsys.obj"
-@erase "$(INTDIR)\btime.obj"
-@erase "$(INTDIR)\btimers.obj"
+ -@erase "$(INTDIR)\chksum.obj"
-@erase "$(INTDIR)\compat.obj"
-@erase "$(INTDIR)\cram-md5.obj"
-@erase "$(INTDIR)\crc32.obj"
-@erase "$(INTDIR)\create_file.obj"
- -@erase "$(INTDIR)\crypto.obj"
-@erase "$(INTDIR)\daemon.obj"
-@erase "$(INTDIR)\dlist.obj"
-@erase "$(INTDIR)\edit.obj"
"$(INTDIR)\bsys.obj" \
"$(INTDIR)\btime.obj" \
"$(INTDIR)\btimers.obj" \
+ "$(INTDIR)\chksum.obj" \
"$(INTDIR)\compat.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
"$(INTDIR)\create_file.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\daemon.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
-@erase "$(INTDIR)\btime.sbr"
-@erase "$(INTDIR)\btimers.obj"
-@erase "$(INTDIR)\btimers.sbr"
+ -@erase "$(INTDIR)\chksum.obj"
+ -@erase "$(INTDIR)\chksum.sbr"
-@erase "$(INTDIR)\compat.obj"
-@erase "$(INTDIR)\compat.sbr"
-@erase "$(INTDIR)\cram-md5.obj"
-@erase "$(INTDIR)\crc32.sbr"
-@erase "$(INTDIR)\create_file.obj"
-@erase "$(INTDIR)\create_file.sbr"
- -@erase "$(INTDIR)\crypto.obj"
- -@erase "$(INTDIR)\crypto.sbr"
-@erase "$(INTDIR)\daemon.obj"
-@erase "$(INTDIR)\daemon.sbr"
-@erase "$(INTDIR)\dlist.obj"
"$(INTDIR)\bsys.sbr" \
"$(INTDIR)\btime.sbr" \
"$(INTDIR)\btimers.sbr" \
+ "$(INTDIR)\chksum.sbr" \
"$(INTDIR)\compat.sbr" \
"$(INTDIR)\cram-md5.sbr" \
"$(INTDIR)\crc32.sbr" \
"$(INTDIR)\create_file.sbr" \
- "$(INTDIR)\crypto.sbr" \
"$(INTDIR)\daemon.sbr" \
"$(INTDIR)\dlist.sbr" \
"$(INTDIR)\edit.sbr" \
"$(INTDIR)\bsys.obj" \
"$(INTDIR)\btime.obj" \
"$(INTDIR)\btimers.obj" \
+ "$(INTDIR)\chksum.obj" \
"$(INTDIR)\compat.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
"$(INTDIR)\create_file.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\daemon.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
!ENDIF
-SOURCE=..\compat\compat.cpp
+SOURCE=..\filed\chksum.cpp
!IF "$(CFG)" == "baculafd - Win32 Release"
-"$(INTDIR)\compat.obj" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\chksum.obj" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ELSEIF "$(CFG)" == "baculafd - Win32 Debug"
-"$(INTDIR)\compat.obj" "$(INTDIR)\compat.sbr" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\chksum.obj" "$(INTDIR)\chksum.sbr" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ENDIF
-SOURCE="..\lib\cram-md5.cpp"
+
+SOURCE=..\compat\compat.cpp
!IF "$(CFG)" == "baculafd - Win32 Release"
-"$(INTDIR)\cram-md5.obj" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\compat.obj" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ELSEIF "$(CFG)" == "baculafd - Win32 Debug"
-"$(INTDIR)\cram-md5.obj" "$(INTDIR)\cram-md5.sbr" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\compat.obj" "$(INTDIR)\compat.sbr" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ENDIF
-SOURCE=..\lib\crc32.cpp
+SOURCE="..\lib\cram-md5.cpp"
!IF "$(CFG)" == "baculafd - Win32 Release"
-"$(INTDIR)\crc32.obj" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\cram-md5.obj" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ELSEIF "$(CFG)" == "baculafd - Win32 Debug"
-"$(INTDIR)\crc32.obj" "$(INTDIR)\crc32.sbr" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\cram-md5.obj" "$(INTDIR)\cram-md5.sbr" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ENDIF
-SOURCE=..\findlib\create_file.cpp
+SOURCE=..\lib\crc32.cpp
!IF "$(CFG)" == "baculafd - Win32 Release"
-"$(INTDIR)\create_file.obj" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\crc32.obj" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ELSEIF "$(CFG)" == "baculafd - Win32 Debug"
-"$(INTDIR)\create_file.obj" "$(INTDIR)\create_file.sbr" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\crc32.obj" "$(INTDIR)\crc32.sbr" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ENDIF
-
-SOURCE=..\lib\crypto.cpp
+SOURCE=..\findlib\create_file.cpp
!IF "$(CFG)" == "baculafd - Win32 Release"
-"$(INTDIR)\crypto.obj" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\create_file.obj" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ELSEIF "$(CFG)" == "baculafd - Win32 Debug"
-"$(INTDIR)\crypto.obj" "$(INTDIR)\crypto.sbr" : $(SOURCE) "$(INTDIR)"
+"$(INTDIR)\create_file.obj" "$(INTDIR)\create_file.sbr" : $(SOURCE) "$(INTDIR)"
$(CPP) $(CPP_PROJ) $(SOURCE)
!ENDIF
-
SOURCE=..\lib\daemon.cpp
!IF "$(CFG)" == "baculafd - Win32 Release"
-@erase "$(INTDIR)\console_conf.obj"
-@erase "$(INTDIR)\cram-md5.obj"
-@erase "$(INTDIR)\crc32.obj"
- -@erase "$(INTDIR)\crypto.obj"
-@erase "$(INTDIR)\dlist.obj"
-@erase "$(INTDIR)\edit.obj"
-@erase "$(INTDIR)\getopt.obj"
"$(INTDIR)\console_conf.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
"$(INTDIR)\getopt.obj" \
-@erase "$(INTDIR)\console_conf.obj"
-@erase "$(INTDIR)\cram-md5.obj"
-@erase "$(INTDIR)\crc32.obj"
- -@erase "$(INTDIR)\crypto.obj"
-@erase "$(INTDIR)\dlist.obj"
-@erase "$(INTDIR)\edit.obj"
-@erase "$(INTDIR)\getopt.obj"
"$(INTDIR)\console_conf.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
"$(INTDIR)\getopt.obj" \
$(CPP) $(CPP_PROJ) $(SOURCE)
-SOURCE=..\lib\crypto.cpp
-
-"$(INTDIR)\crypto.obj" : $(SOURCE) "$(INTDIR)"
- $(CPP) $(CPP_PROJ) $(SOURCE)
-
-
-
SOURCE=..\lib\dlist.cpp
"$(INTDIR)\dlist.obj" : $(SOURCE) "$(INTDIR)"
--- /dev/null
+#include "../../filed/chksum.c"
+++ /dev/null
-#include "../../lib/crypto.c"
..\lib\btime
..\lib\cram-md5
..\lib\crc32
-..\lib\crypto
..\lib\daemon
..\lib\dlist
..\lib\edit
-@erase "$(INTDIR)\btime.obj"
-@erase "$(INTDIR)\cram-md5.obj"
-@erase "$(INTDIR)\crc32.obj"
- -@erase "$(INTDIR)\crypto.obj"
-@erase "$(INTDIR)\daemon.obj"
-@erase "$(INTDIR)\dlist.obj"
-@erase "$(INTDIR)\edit.obj"
"$(INTDIR)\btime.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\daemon.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
-@erase "$(INTDIR)\cram-md5.sbr"
-@erase "$(INTDIR)\crc32.obj
-@erase "$(INTDIR)\crc32.sbr"
- -@erase "$(INTDIR)\crypto.obj
- -@erase "$(INTDIR)\crypto.sbr"
-@erase "$(INTDIR)\daemon.obj
-@erase "$(INTDIR)\daemon.sbr"
-@erase "$(INTDIR)\dlist.obj
"$(INTDIR)\btime.sbr" \
"$(INTDIR)\cram-md5.sbr" \
"$(INTDIR)\crc32.sbr" \
- "$(INTDIR)\crypto.sbr" \
"$(INTDIR)\daemon.sbr" \
"$(INTDIR)\dlist.sbr" \
"$(INTDIR)\edit.sbr" \
"$(INTDIR)\btime.obj" \
"$(INTDIR)\cram-md5.obj" \
"$(INTDIR)\crc32.obj" \
- "$(INTDIR)\crypto.obj" \
"$(INTDIR)\daemon.obj" \
"$(INTDIR)\dlist.obj" \
"$(INTDIR)\edit.obj" \
!ENDIF
-FILENAME=crypto
-SOURCE=..\lib\crypto.cpp
-!IF "$(CFG)" == "wx-console - Win32 Release"
-
-
-"$(INTDIR)\$(FILENAME).obj" : $(SOURCE) "$(INTDIR)"
- $(CPP) $(CPP_PROJ) $(SOURCE)
-
-
-!ELSEIF "$(CFG)" == "wx-console - Win32 Debug"
-
-
-"$(INTDIR)\$(FILENAME).obj" "$(INTDIR)\$(FILENAME).sbr" : $(SOURCE) "$(INTDIR)"
- $(CPP) $(CPP_PROJ) $(SOURCE)
-
-
-!ENDIF
-
-
FILENAME=daemon
SOURCE=..\lib\daemon.cpp
!IF "$(CFG)" == "wx-console - Win32 Release"
@$(MV) Makefile Makefile.bak
@$(SED) "/^# DO NOT DELETE:/,$$ d" Makefile.bak > Makefile
@$(ECHO) "# DO NOT DELETE: nice dependency list follows" >> Makefile
- @$(CXX) -S -M $(CPPFLAGS) $(CONS_CPPFLAGS) -I$(srcdir) -I$(basedir) $(CONSSRCS) >> Makefile
+ @$(CXX) -S -M $(CPPFLAGS) $(CONS_CPPFLAGS) -I$(srcdir) -I$(basedir) $(OPENSSL_INC) $(CONSSRCS) >> Makefile
@if test -f Makefile ; then \
$(RMF) Makefile.bak; \
else \
+++ /dev/null
-config.out
-build
-bin
-test.out
-weird-files
-weird-files2
-diff
-tmp
-working
-Makefile
-test1.out
-time.out
+++ /dev/null
-#
-# Makefile for Bacula regression testing
-#
-# Note, Makefile is built from Makefile.in, which you should not really
-# need to change, by envoking:
-#
-# ./config <user's configuration>
-# e.g.
-#
-# ./config kern.conf
-#
-#
-
-# suck in user's configuration
-@CONFIG@
-
-first_rule: all
-
-all:
-
-setup: bacula sed
-
-#
-# Some machines cannot handle the sticky bit and other garbage that
-# is in weird-files, so we load and run it only on Linux machines.
-#
-bacula: all
- @rm -rf bin build weird-files tmp
- (if test x`uname` = xLinux -o x`uname` = xFreeBSD ; then \
- tar xfz weird-files.tar.gz ;\
- fi)
- rm -rf tmp working
- mkdir tmp working
- echo "Doing: scripts/setup ${BACULA_SOURCE} ${EMAIL} ${WHICHDB} ${TCPWRAPPERS}"
- scripts/setup ${BACULA_SOURCE} ${EMAIL} ${WHICHDB} ${TCPWRAPPERS} ${SMTP_HOST}
-
-sed:
- echo "Doing: scripts/do_sed ${EMAIL} ${TAPE_DRIVE} ${AUTOCHANGER} ${AUTOCHANGER_PATH} ${TAPE_DRIVE1} ${SMTP_HOST}"
- scripts/do_sed ${EMAIL} ${TAPE_DRIVE} ${AUTOCHANGER} ${AUTOCHANGER_PATH} ${TAPE_DRIVE1} ${SMTP_HOST}
-
-# Run all non-root userid tests
-test:
- ./all-non-root-tests
-
-# run all file and tape tests
-full_test:
- ./all-tape-and-file-tests
-
-# These tests require you to run as root
-root_test:
- ./all-root-tests
-
-clean:
- scripts/cleanup
- rm -f /tmp/file-list
- rm -f tmp/* working/*
- rm -f test.out
- rm -f diff
- rm -f 1 2 3 scripts/1 scripts/2 scripts/3 tests/1 tests/2 tests/3
- @find . -name .#* -exec $(RMF) {} \;
-
-# Reset our userid after running as root
-reset:
- chown -R ${USER}:${USER} . tmp working
- scripts/cleanup
- rm -f /tmp/file-list tmp/file-list
- rm -f tmp/* working/*
-
-distclean: clean
- rm -rf bin build weird-files weird-files weird-files2 tmp working
- rm -f scripts/*.conf
-
+++ /dev/null
- Bacula Regression
- Kern Sibbald
-
-This is Bacula's regression script directory.
-
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-Warning!!!! Make sure not to run it on the same system
-with your production Catalog because the tables will all
-be cleared. You can run it on the your production system
-if you use a different database.
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
-To set it up, create your personal configuration file, by
-copying prototype.conf to xxx.conf or simply editing prototype.conf
-directly.
-
-Then edit your conf file and define appropriate values
-for the variables that are in that file. If you want to see
-a real example, look at kern.conf, but please don't use my
-email address!
-
-Make sure that depkgs is pre-built if it isn't
-already: (cd your-depkgs; make sqlite).
-Using the .conf file, you can now select between any Catalog type:
-SQLite, SQLite3, MySQL, or PostgreSQL. Be aware, however, if you
-use an installed database on a production server, running these
-tests will delete all the tables !!!!!!!!!!!!!!!!!! I run my
-tests on a non-production machine, and in addition, I normally use
-SQLite as the database, while my production uses MySQL.
-
-Then do:
-
- ./config xxx.conf
- make setup
-
-You run the above one time. This will build a Makefile from
-Makefile.in and your xxx.conf file, copy the Bacula source,
-configure, build it, and configure all the Bacula scripts
-and conf files. If you change your source, you will need to
-redo this command.
-
-Then you can run any of the tests in the tests subdirectory.
-Each test whose name ends in -root requires you to be root for
-a resonable run. Each test is totally independent of any other
-test. Aside from the required "make setup", each test is totally
-self-initalizing and should clean up after itself.
-
-Not all the tests yet report OK. This is simply because there are
-some spurious differences that I haven't yet taken the time to
-eliminate. The working scrips as of 24 Apr 03 are (this is
-way out of date!):
-
-backup-bacula-test
-sparse-test
-compressed-test
-sparse-compressed-test
-two-jobs-test
-wierd-files-test
-verify-vol-test
-
-The tests expect you to execute them from the main regress
-directory!
-
-You can run all the disk based tests by doing:
-
- ./do_file
-
-You can run all the disk and most of the tape tests by doing:
-
- ./do_all
-
-Each of the above calls one or more scripts. By looking at the
-scripts available in this directory, you can see that there are a number
-of options for running tests.
-
-You can run them individually as:
-
- tests/two-jobs-test
-
-or all non-root tests (my normal testing under my account)
-
- ./all-non-root-tests
-
-or all tests (I only run these before a production release):
-
- su
- ./all-tests
-
-
-after running the root tests, while still root, it is a good idea
-to do:
-
- make reset
-
-this cleans up any files that may be created with root permissions.
-
-If you want to add more tests, do so by putting the shell script
-in the tests subdirectory. Be careful when adding (or better not)
-new clients, pools, and such to the test-bacula-dir.conf.in file
-as it may invalidate a good number of tests, which respond to
-questions by answering with a number (i.e. the order of the selection
-list is known). It might be better to add your own testb-bacula...
-configuration file.
-
-To avoid re-doing a make setup if you have made a change to the
-conf files, and you do not need a new copy of the source, you can simply do:
-
- make sed
-
-Debugging failed tests:
-The simplest thing to do is to edit tests/xxxx where xxxx is the name of
-the test, and change the line "debug=0" to "debug=1". If the test has
-not been updated to have the debug variable, please notify Kern, and I
-will be happy to fix it -- I am upgrading them one at a time.
+++ /dev/null
-#!/bin/sh
-#
-# Run all tape tests
-#
-tests/test0
-tests/two-volume-tape
-tests/incremental-2tape
-echo " "
-echo " "
-echo "2 Tape Test results"
-cat test.out
-scripts/cleanup
+++ /dev/null
-#!/bin/sh
-#
-# Run all tape tests
-#
-cp test.out test1.out
-tests/test0
-tests/backup-bacula-tape
-tests/bscan-tape
-tests/btape-fill-tape
-tests/fixed-block-size-tape
-tests/four-concurrent-jobs-tape
-tests/four-jobs-tape
-tests/incremental-tape
-tests/relabel-tape
-tests/restore-by-file-tape
-tests/small-file-size-tape
-tests/truncate-bug-tape
-tests/two-pool-tape
-tests/2drive-incremental-2tape
-echo " "
-echo " "
-echo "Test results"
-cat test.out
-scripts/cleanup
+++ /dev/null
-#!/bin/sh
-#
-# Run all tests
-#
-tests/test0
-echo " "
-tests/auto-label-test
-tests/backup-bacula-test
-tests/bextract-test
-tests/bscan-test
-tests/bsr-opt-test
-tests/compressed-test
-tests/concurrent-jobs-test
-tests/differential-test
-tests/four-concurrent-jobs-test
-tests/four-jobs-test
-tests/incremental-test
-tests/query-test
-tests/recycle-test
-tests/restore2-by-file-test
-tests/restore-by-file-test
-tests/restore-disk-seek-test
-tests/six-vol-test
-tests/span-vol-test
-tests/sparse-compressed-test
-tests/sparse-test
-tests/two-jobs-test
-tests/two-vol-test
-tests/verify-vol-test
-tests/weird-files2-test
-tests/weird-files-test
-echo " "
-echo "Test results"
-cat test.out
-scripts/cleanup
+++ /dev/null
-#!/bin/sh
-#
-# Run all root tests
-#
-rm -f test.out
-tests/dev-test-root
-tests/etc-test-root
-tests/lib-test-root
-tests/usr-tape-root
-cat test.out
-scripts/cleanup
+++ /dev/null
-#!/bin/sh
-#
-# Run all tests
-#
-./all-non-root-tests
-./endtime
-./all-non-root-tape-tests
+++ /dev/null
-#!/bin/sh
-#
-# Run all tests
-#
-./all-non-root-tests
-./all-root-tests
-cat test.out
-scripts/cleanup
+++ /dev/null
-#/bin/sh
-#
-# First argument is expected to be a user's configuration file
-#
-if ! test -e $1 ; then
- echo "Arg1 must specify a config file (e.g. prototype.conf)"
- exit 1
-fi
-sed -e "/@CONFIG@/r $1" -e "s/@CONFIG@//" Makefile.in >Makefile
+++ /dev/null
-#!/bin/sh
-/home/kern/bacula/bin/startmysql
-make setup
-./starttime
-if [ ! -e bin/tape_options ] ; then
- touch bin/tape_options
-fi
-./all-tape-and-file-tests
-./endtime
-./all-non-root-2tape-tests
-./endtime
+++ /dev/null
-#!/bin/sh
-make setup
-./all-non-root-tape-tests
+++ /dev/null
-#!/bin/sh
-# /home/kern/bacula/bin/startmysql
-make setup
-./starttime
-./all-non-root-tests
-./endtime
+++ /dev/null
-#!/usr/bin/python
-from time import time as now
-
-t = now()
-fn = open('time.out', 'r')
-s = fn.readline()
-fn.close()
-diff = t - float(s)
-h = int(diff / 3600)
-m = int((diff - h * 3600) / 60)
-sec = diff - h * 3600 - m * 60
-print 'Total time = %d:%02d:%02d or %d secs' % (h, m, sec, t - float(s))
+++ /dev/null
-#!/bin/sh
-./all-non-root-tests
+++ /dev/null
-# Where to get the source to be tested
-# BACULA_SOURCE="${HOME}/bacula/branch-1.36.2"
-BACULA_SOURCE="${HOME}/bacula/k"
-
-# Where to send email !!!!! Change me !!!!!!!
-EMAIL=kern@sibbald.com
-SMTP_HOST="matou.sibbald.com"
-
-# Full "default" path where to find sqlite (no quotes!)
-SQLITE_DIR=${HOME}/bacula/depkgs/sqlite
-
-TAPE_DRIVE="/dev/nsa0"
-# if you don't have an autochanger set AUTOCHANGER to /dev/null
-AUTOCHANGER="/dev/sg0"
-# For two drive tests -- set to /dev/null if you do not have it
-TAPE_DRIVE1="/dev/null"
-
-# This must be the path to the autochanger including its name
-AUTOCHANGER_PATH="/usr/local/sbin/mtx"
-
-# Set your database here
-WHICHDB?="--with-sqlite=${SQLITE_DIR}"
-#WHICHDB="--with-mysql=${HOME}/mysql"
-
-# Set this to "--with-tcp-wrappers" or "--without-tcp-wrappers"
-TCPWRAPPERS="--with-tcp-wrappers"
+++ /dev/null
-# Where to get the source to be tested
-# BACULA_SOURCE="${HOME}/bacula/branch-1.38.1"
-BACULA_SOURCE="${HOME}/bacula/k"
-
-# Where to send email !!!!! Change me !!!!!!!
-EMAIL=kern@sibbald.com
-SMTP_HOST="localhost"
-
-# Full "default" path where to find sqlite (no quotes!)
-#SQLITE_DIR=${HOME}/bacula/depkgs/sqlite3
-SQLITE_DIR=${HOME}/bacula/depkgs/sqlite
-
-TAPE_DRIVE="/dev/nst0"
-# if you don't have an autochanger set AUTOCHANGER to /dev/null
-AUTOCHANGER="/dev/sg3"
-# For two drive tests -- set to /dev/null if you do not have it
-TAPE_DRIVE1="/dev/nst1"
-
-# This must be the path to the autochanger including its name
-AUTOCHANGER_PATH="/usr/sbin/mtx"
-
-# Set your database here
-WHICHDB?="--with-sqlite=${SQLITE_DIR}"
-#WHICHDB="--with-mysql=${HOME}/mysql"
-#WHICHDB="--with-postgresql"
-
-# Set this to "--with-tcp-wrappers" or "--without-tcp-wrappers"
-TCPWRAPPERS="--with-tcp-wrappers"
-
+++ /dev/null
-tests/btape-fill-full-tape
-tests/eot-fail-tape
+++ /dev/null
-#
-# Prototype personal configuration file for the regression
-# scripts. Either edit this file directly, or better copy
-# it elsewhere so it won't get overwritten.
-#
-
-#
-# Where to get the source to be tested
-#
-BACULA_SOURCE=
-
-# Where to send email messages
-#
-EMAIL=
-SMTP_HOST=localhost
-
-# Full "default" path where to find sqlite.
-# This is only used if you do not specify a database override on the
-# make command.
-#
-# N.B. DON'T PUT QOUTES AROUND THE PATH.
-#
-SQLITE_DIR=${HOME}/bacula/depkgs/sqlite
-
-#
-# The device name of your tape drive if you have one
-#
-TAPE_DRIVE=/dev/nst0
-TAPE_DRIVE1=/dev/null
-
-#
-# if you don't have an autochanger set AUTOCHANGER to /dev/null
-#
-AUTOCHANGER=/dev/sg0
-
-#
-# This must be the path to the autochanger including its name
-#
-AUTOCHANGER_PATH=/bin/mtx
-
-# Set your database here
-WHICHDB?="--with-sqlite=${SQLITE_DIR}"
-#WHICHDB="--with-mysql=${HOME}/mysql"
-
-# Set this to "--with-tcp-wrappers" or "--without-tcp-wrappers"
-TCPWRAPPERS="--with-tcp-wrappers"
+++ /dev/null
-#!/bin/sh
-bin/bacula start
-bin/bconsole -c bin/bconsole.conf
+++ /dev/null
-new-test-bacula-dir.conf
-bacula-dir.conf
-bacula-fd.conf
-bacula-sd.conf
-console.conf
-bconsole.conf
-test-bacula-dir.conf
-test-bacula-fd.conf
-test-bacula-sd.conf
-test-console.conf
-testa-bacula-dir.conf
-bacula-dir-tape.conf
-bacula-sd-tape.conf
-bacula-sd-2tape.conf
-cleanup-tape
-cleanup-2tape
-prepare-two-tapes
-cleanup-2drive
-bacula-sd-2drive.conf
-bacula-sd-win32-tape.conf
-bacula-sd-2drive.conf
-bacula-dir-win32-tape.conf
-win32-bacula-dir-tape.conf
+++ /dev/null
-#
-# Default Bacula Director Configuration file
-#
-# The only thing that MUST be changed is to add one or more
-# file or directory names in the Include directive of the
-# FileSet resource.
-#
-# For Bacula release 1.33
-#
-# You might also want to change the default email address
-# from root to your address. See the "mail" and "operator"
-# directives in the Messages resource.
-#
-
-Director { # define myself
- Name = @hostname@-dir
- DIRport = 8101 # where we listen for UA connections
- QueryFile = "@scriptdir@/query.sql"
- WorkingDirectory = "@working_dir@"
- PidDirectory = "@piddir@"
- Maximum Concurrent Jobs = 4
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3"
- Messages = Daemon
-}
-
-#
-# Define the main nightly save backup job
-# By default, this job will back up to disk in /tmp
-Job {
- Name = "NightlySave"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData = yes
-# Prefer Mounted Volumes = no
-}
-
-Job {
- Name = "NightlySave1"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData = yes
-# Prefer Mounted Volumes = no
- Client Run Before Job = "/bin/sleep 120"
-}
-
-Job {
- Name = "NightlySave2"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData = yes
-# Prefer Mounted Volumes = no
-}
-
-
-# Standard Restore template, to be changed by Console program
-Job {
- Name = "RestoreFiles"
- Type = Restore
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Where = /tmp/bacula-restores
-}
-
-
-# List of files to be backed up
-FileSet {
- Name = "Full Set"
- Include { Options { signature=MD5 }
- File = </tmp/file-list
- }
-}
-
-
-#
-# When to do the backups, full backup on first sunday of the month,
-# differential (i.e. incremental since full) every other sunday,
-# and incremental backups other days
-Schedule {
- Name = "WeeklyCycle"
- Run = Full 1st sun at 1:05
- Run = Differential 2nd-5th sun at 1:05
- Run = Incremental mon-sat at 1:05
-}
-
-# Client (File Services) to backup
-Client {
- Name = @hostname@-fd
- Address = @hostname@
- FDPort = 8102
- Catalog = MyCatalog
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
- Maximum Concurrent Jobs = 4
-}
-
-# Definition of DDS tape storage device
-Storage {
- Name = DDS-4
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
- Device = DDS-4 # must be same as Device in Storage daemon
- Media Type = DDS-4 # must be same as MediaType in Storage daemon
- Maximum Concurrent Jobs = 4
-# Autochanger = yes
-}
-
-
-# Generic catalog service
-Catalog {
- Name = MyCatalog
- dbname = bacula; user = bacula; password = ""
-}
-
-# Reasonable message delivery -- send most everything to email address
-# and to the console
-Messages {
- Name = Standard
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r"
- MailOnError = @job_email@ = all, !terminate
- operator = @job_email@ = mount
- console = all, !skipped, !terminate, !restored
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-#
-# Message delivery for daemon messages (no job).
-Messages {
- Name = Daemon
- mailcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) %r\" -s \"Bacula daemon message\" %r"
- mail = @job_email@ = all, !skipped
- console = all, !skipped, !saved
- append = "@working_dir@/log" = all, !skipped
-}
-
-
-# Default pool definition
-Pool {
- Name = Default
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
-
-Pool {
- Name = Full
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
-
-Pool {
- Name = Inc
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = @hostname@-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "@working_dir@"
- Pid Directory = "@piddir@"
- Subsys Directory = "@subsysdir@"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = @hostname@-dir
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-Autochanger {
- Name = DDS-4
- Changer Device = @autochanger@
- Changer Command ="@scriptdir@/mtx-changer %c %o %S %a %d"
- Device = Drive-0, Drive-1
-}
-
-Device {
- Name = Drive-0
- Media Type = DDS-4
- Archive Device = @tape_drive@
- AutomaticMount = yes; # when device opened, read it
- Autochanger = yes
- Drive Index = 0
- AlwaysOpen = yes;
- RemovableMedia = yes;
- @@sbindir@/tape_options
-# Maximum File Size = 1000000
-}
-
-Device {
- Name = Drive-1
- Media Type = DDS-4
- Archive Device = @tape_drive1@
- AutomaticMount = yes; # when device opened, read it
- Autochanger = yes
- Drive Index = 1
- AlwaysOpen = yes;
- RemovableMedia = yes;
- @@sbindir@/tape_options
-# Maximum File Size = 1000000
-}
-
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = @hostname@-dir = all, !terminate
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = @hostname@-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "@working_dir@"
- Pid Directory = "@piddir@"
- Subsys Directory = "@subsysdir@"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = @hostname@-dir
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-Autochanger {
- Name = DDS-4
- Changer Device = @autochanger@
- Changer Command ="@scriptdir@/mtx-changer %c %o %S %a %d"
- Device = Drive-0
-}
-
-Device {
- Name = Drive-0 #
- Media Type = DDS-4
- Archive Device = @tape_drive@
- AutomaticMount = yes; # when device opened, read it
- Autochanger = yes
- Drive Index = 0
- AlwaysOpen = yes;
- RemovableMedia = yes;
- @@sbindir@/tape_options
-# Maximum File Size = 1000000
-}
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = @hostname@-dir = all, !terminate
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = @hostname@-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "@working_dir@"
- Pid Directory = "@piddir@"
- Subsys Directory = "@subsysdir@"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = @hostname@-dir
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-
-Device {
- Name = DDS-4 #
- Media Type = DDS-4
- Archive Device = @tape_drive@
- AutomaticMount = yes; # when device opened, read it
- AlwaysOpen = yes;
- RemovableMedia = yes;
- @@sbindir@/tape_options
-# Maximum File Size = 1000000
-# MaximumVolumeSize = 100M
-}
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = @hostname@-dir = all, !terminate
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = rufus-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "/home/kern/bacula/regress/working"
- Pid Directory = "/home/kern/bacula/regress/working"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = rufus-dir
- Password = "BzlEl8haeFmnv/Lv8V6zDzUBgFFQNsUtny6VkmccQpOy"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-
-Device {
- Name = FileStorage
- Media Type = File
- Archive Device = @tmpdir@
- LabelMedia = yes; # lets Bacula label unlabeled media
- Random Access = Yes;
- AutomaticMount = yes; # when device opened, read it
- RemovableMedia = no;
- AlwaysOpen = no;
-}
-
-#Device {
-# Name = DDS-4 #
-# Media Type = DDS-4
-# Archive Device = /dev/nst0
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = yes;
-# RemovableMedia = yes;
-#}
-
-#
-# A very old Exabyte with no end of media detection
-#
-#Device {
-# Name = "Exabyte 8mm"
-# Media Type = "8mm"
-# Archive Device = /dev/nst0
-# Hardware end of medium = No;
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = Yes;
-# RemovableMedia = yes;
-#}
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = rufus-dir = all, !terminate
-}
+++ /dev/null
-#!/bin/sh
-#
-# Check for zombie jobs (not terminated).
-# Also scan logs for ERROR messages
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output tmp/dir.out
-status dir
-@output tmp/fd.out
-status client
-@output tmp/sd.out
-status $1
-@output
-quit
-END_OF_DATA
-grep "No Jobs running." tmp/dir.out 2>&1 >/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " !!!! Zombie Jobs in Director !!!!"
- echo " !!!! Zombie Jobs in Director !!!!" >>test.out
- echo " "
-fi
-grep "No Jobs running." tmp/fd.out 2>&1 >/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " !!!! Zombie Jobs in File daemon !!!!"
- echo " !!!! Zombie Jobs in File daemon !!!!" >>test.out
- echo " "
-fi
-grep "No Jobs running." tmp/sd.out 2>&1 >/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " !!!! Zombie Jobs in Storage daemon !!!!"
- echo " !!!! Zombie Jobs in Storage daemon !!!!" >>test.out
- echo " "
-fi
-grep "ERROR" tmp/log*.out 2>&1 >/dev/null
-if [ $? = 0 ] ; then
- echo " "
- echo " !!!! ERROR in log output !!!!"
- echo " !!!! ERROR in log output !!!!" >>test.out
- echo " "
-fi
-grep "Fatal Error" tmp/log*.out 2>&1 >/dev/null
-if [ $? = 0 ] ; then
- echo " "
- echo " !!!! Fatal Error in log output !!!!"
- echo " !!!! Fatal Error in log output !!!!" >>test.out
- echo " "
-fi
-
+++ /dev/null
-#!/bin/sh
-#
-# Cleanup left over files -- both before and after test run
-#
-rm -rf /tmp/TestVolume001 /tmp/bacula-restores /tmp/Small*
-rm -rf tmp/original tmp/bacula-restores tmp/Small* tmp/TestVolume*
-rm -rf tmp/restored tmp/largefile tmp/bscan.bsr tmp/log*.out
-rm -rf /tmp/sed_tmp /tmp/file-list
-rm -rf tmp/build tmp/restore-list tmp/restore2-list
-rm -rf tmp/fd.out tmp/dir.out tmp/sd.out
-rm -rf working/log tmp/TEST-*
-rm -rf working/*restore*.bsr
-
-bin/bacula stop 2>&1 >/dev/null
-cd bin
-./drop_bacula_tables >/dev/null 2>&1
-./make_bacula_tables >/dev/null 2>&1
-./grant_bacula_privileges >/dev/null 2>&1
-cd ..
+++ /dev/null
-#!/bin/sh
-#
-# Cleanup left over files -- both before and after test run
-#
-scripts/cleanup
-
-if test x@autochanger@ != x/dev/null; then
- slot=`bin/mtx-changer @autochanger@ loaded 0 @tape_drive1@ 1`
- echo "Slot $slot in drive 1"
- if test x$slot != x0 ; then
- echo "unload slot $slot in drive 1"
- bin/mtx-changer @autochanger@ unload $slot @tape_drive1@ 1
- fi
- slot=`bin/mtx-changer @autochanger@ loaded 0 @tape_drive@ 0`
- echo "Slot $slot in drive 0"
- if test x$slot != x0 ; then
- echo "unload slot $slot in drive 0"
- bin/mtx-changer @autochanger@ unload $slot @tape_drive@ 0
- fi
-
-
- slot=`bin/mtx-changer @autochanger@ loaded 0 @tape_drive1@ 1`
- if test x$slot != x2; then
- echo "load slot 2 in drive 1"
- bin/mtx-changer @autochanger@ load 2 @tape_drive1@ 1
- fi
- echo "WEOF drive 1"
- mt -f @tape_drive1@ rewind
- mt -f @tape_drive1@ weof
- echo "unload slot 2 in drive 1"
- bin/mtx-changer @autochanger@ unload 2 @tape_drive1@ 1
-
- slot=`bin/mtx-changer @autochanger@ loaded 0 @tape_drive@ 0`
- echo "Slot $slot in drive 0"
- if test x$slot != x1; then
- echo "load slot 1 in drive 0"
- bin/mtx-changer @autochanger@ load 1 @tape_drive@ 0
- fi
- echo "WEOF drive 0"
- mt -f @tape_drive@ rewind
- mt -f @tape_drive@ weof
- echo "unload slot 1 from drive 0"
- bin/mtx-changer @autochanger@ unload 1 @tape_drive@ 0
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Cleanup left over files -- both before and after test run
-#
-scripts/cleanup
-
-if test x@autochanger@ != x/dev/null; then
- drive=`bin/mtx-changer @autochanger@ loaded 0 dummy 0`
- if test x${drive} != x2; then
- bin/mtx-changer @autochanger@ unload 0 @tape_drive@ 0
- bin/mtx-changer @autochanger@ load 2 @tape_drive@ 0
- fi
- mt -f @tape_drive@ rewind
- mt -f @tape_drive@ weof
-fi
-
-
-#
-# If we have an autochanger always load tape in slot 1
-#
-if test x@autochanger@ != x/dev/null; then
- drive=`bin/mtx-changer @autochanger@ loaded 0 dummy 0`
- if test x${drive} != x1; then
- bin/mtx-changer @autochanger@ unload 0 @tape_drive@ 0
- bin/mtx-changer @autochanger@ load 1 @tape_drive@ 0
- fi
-fi
-
-mt -f @tape_drive@ rewind
-mt -f @tape_drive@ weof
+++ /dev/null
-#!/bin/sh
-#
-# Cleanup left over files -- both before and after test run
-#
-scripts/cleanup
-
-mt -f @tape_drive@ rewind
-mt -f @tape_drive@ weof
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/bacula-dir-tape.conf bin/bacula-dir.conf
-/bin/cp -f scripts/bacula-sd-2drive.conf bin/bacula-sd.conf
-/bin/cp -f scripts/test-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-outf="tmp/sed_tmp"
-echo "s%# Autochanger = yes% Autochanger = yes%g" >${outf}
-cp bin/bacula-dir.conf tmp/1
-sed -f ${outf} tmp/1 >bin/bacula-dir.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/bacula-dir-tape.conf bin/bacula-dir.conf
-/bin/cp -f scripts/bacula-sd-2tape.conf bin/bacula-sd.conf
-/bin/cp -f scripts/test-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-outf="tmp/sed_tmp"
-echo "s%# Autochanger = yes% Autochanger = yes%g" >${outf}
-cp bin/bacula-dir.conf tmp/1
-sed -f ${outf} tmp/1 >bin/bacula-dir.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/bacula-dir.conf bin/bacula-dir.conf
-/bin/cp -f scripts/bacula-sd.conf bin/bacula-sd.conf
-/bin/cp -f scripts/bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/bconsole.conf bin/bconsole.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/bacula-dir-tape.conf bin/bacula-dir.conf
-/bin/cp -f scripts/bacula-sd-tape.conf bin/bacula-sd.conf
-/bin/cp -f scripts/test-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/new-test-bacula-dir.conf bin/bacula-dir.conf
-/bin/cp -f scripts/test-bacula-sd.conf bin/bacula-sd.conf
-/bin/cp -f scripts/test-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/testa-bacula-dir.conf bin/bacula-dir.conf
-/bin/cp -f scripts/test-bacula-sd.conf bin/bacula-sd.conf
-/bin/cp -f scripts/test-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-/bin/cp -f scripts/win32-bacula-dir-tape.conf bin/bacula-dir.conf
-/bin/cp -f scripts/win32-bacula-sd-tape.conf bin/bacula-sd.conf
-/bin/cp -f scripts/win32-bacula-fd.conf bin/bacula-fd.conf
-/bin/cp -f scripts/test-console.conf bin/bconsole.conf
-
-# get proper SD tape definitions
-cp -f scripts/linux_tape_options bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f scripts/freebsd_tape_options bin/tape_options
-fi
+++ /dev/null
-#!/bin/sh
-#
-if test $# != 6 ; then
- echo "First arg must be email name"
- echo " and the second must be a tape drive"
- echo " and the third must be a tape control name or /dev/null"
- echo " and the fourth must be the full path to the mtx program"
- echo " and the fifth must be tape drive 1 or /dev/null"
- echo " and the sixth must be the smtp or email host"
- exit 1
-fi
-mkdir -p ${cwd}/bin
-out="/tmp/sed_tmp"
-cwd=`pwd`
-HOST="localhost"
-# Create sed command script
-echo "s%@sbindir@%${cwd}/bin%g" >${out}
-echo "s%@scriptdir@%${cwd}/bin%g" >>${out}
-echo "s%@working_dir@%${cwd}/working%g" >>${out}
-echo "s%@piddir@%${cwd}/working%g" >>${out}
-echo "s%@subsysdir@%${cwd}/working%g" >>${out}
-echo "s%@job_email@%${1}%g" >>${out}
-echo "s%@tape_drive@%${2}%g" >>${out}
-echo "s%@autochanger@%${3}%g" >>${out}
-echo "s%@tmpdir@%${cwd}/tmp%g" >>${out}
-echo "s%@hostname@%${HOST}%g" >>${out}
-echo "s%@changer_path@%${4}%g" >>${out}
-echo "s%@tape_drive1@%${5}%g" >>${out}
-echo "s%@smtp_host@%${6}%g" >>${out}
-
-echo "AUTOCHANGER=\"${3}\"" >config.out
-echo "TAPE_DRIVE1=\"${5}\"" >>config.out
-
-
-# process .in files with sed script
-sed -f ${out} ${cwd}/scripts/test-bacula-dir.conf.in >${cwd}/scripts/test-bacula-dir.conf
-sed -f ${out} ${cwd}/scripts/new-test-bacula-dir.conf.in >${cwd}/scripts/new-test-bacula-dir.conf
-sed -f ${out} ${cwd}/scripts/testa-bacula-dir.conf.in >${cwd}/scripts/testa-bacula-dir.conf
-sed -f ${out} ${cwd}/scripts/test-bacula-fd.conf.in >${cwd}/scripts/test-bacula-fd.conf
-sed -f ${out} ${cwd}/scripts/test-bacula-sd.conf.in >${cwd}/scripts/test-bacula-sd.conf
-sed -f ${out} ${cwd}/scripts/test-console.conf.in >${cwd}/scripts/test-console.conf
-sed -f ${out} ${cwd}/scripts/bacula-dir-tape.conf.in >${cwd}/scripts/bacula-dir-tape.conf
-sed -f ${out} ${cwd}/scripts/win32-bacula-dir-tape.conf.in >${cwd}/scripts/win32-bacula-dir-tape.conf
-sed -f ${out} ${cwd}/scripts/bacula-sd-tape.conf.in >${cwd}/scripts/bacula-sd-tape.conf
-sed -f ${out} ${cwd}/scripts/bacula-sd-2tape.conf.in >${cwd}/scripts/bacula-sd-2tape.conf
-sed -f ${out} ${cwd}/scripts/bacula-sd-2drive.conf.in >${cwd}/scripts/bacula-sd-2drive.conf
-sed -f ${out} ${cwd}/scripts/cleanup-tape.in >${cwd}/scripts/cleanup-tape
-sed -f ${out} ${cwd}/scripts/cleanup-2tape.in >${cwd}/scripts/cleanup-2tape
-sed -f ${out} ${cwd}/scripts/cleanup-2drive.in >${cwd}/scripts/cleanup-2drive
-sed -f ${out} ${cwd}/scripts/prepare-two-tapes.in >${cwd}/scripts/prepare-two-tapes
-
-cp ${cwd}/bin/bacula-sd.conf /tmp/bac$$
-sed s%/tmp%${cwd}/tmp%g /tmp/bac$$ >${cwd}/bin/bacula-sd.conf
-chmod 777 ${cwd}/scripts/cleanup-*tape ${cwd}/scripts/cleanup-*drive ${cwd}/scripts/prepare-two-tapes
-rm -f /tmp/bac$$
-cp ${cwd}/bin/mtx-changer /tmp/bac$$
-sed "s%^MTX.*$%MTX=${4}%g" /tmp/bac$$ >${cwd}/bin/mtx-changer
-chmod 777 ${cwd}/bin/mtx-changer
-
-# get proper SD tape definitions
-cp -f ${cwd}/scripts/linux_tape_options ${cwd}/bin/tape_options
-if test x`uname` = xFreeBSD ; then
- cp -f ${cwd}/scripts/freebsd_tape_options ${cwd}/bin/tape_options
-fi
-
-rm -f ${out}
-rm -f /tmp/bac$$
+++ /dev/null
-dev/ptmx
-dev/pts
-dev/rd/c5d2
-dev/rd
-dev/shm
+++ /dev/null
-etc/mail/statistics
+++ /dev/null
-lib/ld-2.2.5.so
-lib/libtermcap.so.2.0.8
-lib/libc-2.2.5.so
-lib/libnsl-2.2.5.so
-lib/libnss_files-2.2.5.so
+++ /dev/null
-/build/configure
-/build/src/stored/bextract
-/build/src/tools/testfind
-/build/Makefile.in
-/build/src/dird/bacula-dir
-/build/src/console/bconsole
-/build/src/filed/bacula-fd
-/build/src/findlib/find_one.c
-/build/src/jcr.h
-/build/platforms/Makefile.in
-/build/platforms/redhat/Makefile.in
-/build/scripts/Makefile.in
-/build/src/filed/win32/winservice.cpp
-/build/src/filed/restore.c
-/build/autoconf/configure.in
-/build/examples/afs-bacula
-/build/src/win32/winbacula.nsi.in
-/build/autoconf/configure.in
-/build/src/version.h
-/build/src/lib/message.c
-/build/src/lib/bnet_server.c
-/build/src/lib/libbac.a
+++ /dev/null
-#
-# FreeBSD tape drive options
-#
-Hardware End of Medium = no
-Fast Forward Space File = no
-BSF at EOM = yes
-Backward Space Record = no
-TWO EOF = yes
+++ /dev/null
- Minimum Block Size = 32768
- Maximum Block Size = 32768
- Hardware End of Medium = yes
- BSF at EOM = yes
- Fast Forward Space File = yes
- Two EOF = no
-
+++ /dev/null
-# nothing needed for Linux
+++ /dev/null
-#
-# Default Bacula Director Configuration file
-#
-# The only thing that MUST be changed is to add one or more
-# file or directory names in the Include directive of the
-# FileSet resource.
-#
-# For Bacula release 1.33
-#
-# You might also want to change the default email address
-# from root to your address. See the "mail" and "operator"
-# directives in the Messages resource.
-#
-
-Director { # define myself
- Name = @hostname@-dir
- DIRport = 8101 # where we listen for UA connections
- QueryFile = "@scriptdir@/query.sql"
- WorkingDirectory = "@working_dir@"
- PidDirectory = "@piddir@"
- SubSysDirectory = "@subsysdir@"
- Maximum Concurrent Jobs = 4
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
- Messages = Standard
-}
-
-#
-# Define the main nightly save backup job
-# By default, this job will back up to disk in /tmp
-Job {
- Name = "NightlySave"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData=yes
-}
-
-Job {
- Name = "MonsterSave"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File1
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-Job {
- Name = "VerifyVolume"
- Type = Verify
- Level = VolumeToCatalog
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-Job {
- Name = "SparseTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="SparseSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-Job {
- Name = "CompressedTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="CompressedSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Maximum Concurrent Jobs = 4
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-Job {
- Name = "SparseCompressedTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="SparseCompressedSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-# Backup the catalog database (after the nightly save)
-Job {
- Name = "BackupCatalog"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Catalog"
-# Schedule = "WeeklyCycleAfterBackup"
- Storage = File
- Messages = Standard
- Pool = Default
- # This creates an ASCII copy of the catalog
- RunBeforeJob = "@sbindir@/make_catalog_backup -u bacula"
- # This deletes the copy of the catalog
- RunAfterJob = "@sbindir@/delete_catalog_backup"
- Write Bootstrap = "@working_dir@/BackupCatalog.bsr"
-}
-
-# Standard Restore template, to be changed by Console program
-Job {
- Name = "RestoreFiles"
- Type = Restore
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Where = /tmp/bacula-restores
-}
-
-
-# List of files to be backed up
-FileSet {
- Name = "Full Set"
- Include { Options { signature=MD5 }
- File = </tmp/file-list
- }
-}
-
-FileSet {
- Name = "SparseSet"
- Include {
- Options {
- signature=MD5 sparse=yes
- }
- File = </tmp/file-list
- }
-}
-
-FileSet {
- Name = "CompressedSet"
- Include {
- Options {
- signature=MD5 compression=GZIP
- }
- File = </tmp/file-list
- }
-}
-
-FileSet {
- Name = "SparseCompressedSet"
- Include {
- Options {
- signature=MD5 compression=GZIP
- }
- File = </tmp/file-list
- }
-}
-
-
-
-#
-# When to do the backups, full backup on first sunday of the month,
-# differential (i.e. incremental since full) every other sunday,
-# and incremental backups other days
-Schedule {
- Name = "WeeklyCycle"
- Run = Full 1st sun at 1:05
- Run = Differential 2nd-5th sun at 1:05
- Run = Incremental mon-sat at 1:05
-}
-
-# This schedule does the catalog. It starts after the WeeklyCycle
-Schedule {
- Name = "WeeklyCycleAfterBackup"
- Run = Full sun-sat at 1:10
-}
-
-# This is the backup of the catalog
-FileSet {
- Name = "Catalog"
- Include {
- Options {
- signature=MD5
- }
- File = /home/kern/bacula/regress/bin/working/bacula.sql
- }
-}
-
-# Client (File Services) to backup
-Client {
- Name = @hostname@-fd
- Address = @hostname@
- FDPort = 8102
- Catalog = MyCatalog
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
- Maximum Concurrent Jobs = 4
-}
-
-# Definiton of file storage device
-Storage {
- Name = File
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage
- Media Type = File
- Maximum Concurrent Jobs = 4
-}
-
-Storage {
- Name = File1
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage1
- Media Type = File1
- Maximum Concurrent Jobs = 4
-}
-
-
-# Definition of DLT tape storage device
-#Storage {
-# Name = DLTDrive
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
-# Device = "HP DLT 80" # must be same as Device in Storage daemon
-# Media Type = DLT8000 # must be same as MediaType in Storage daemon
-#}
-
-# Definition of DDS tape storage device
-#Storage {
-# Name = SDT-10000
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
-# Device = SDT-10000 # must be same as Device in Storage daemon
-# Media Type = DDS-4 # must be same as MediaType in Storage daemon
-#}
-
-# Definition of 8mm tape storage device
-#Storage {
-# Name = "8mmDrive"
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-# Device = "Exabyte 8mm"
-# MediaType = "8mm"
-#}
-
-
-# Generic catalog service
-Catalog {
- Name = MyCatalog
- dbname = bacula; user = bacula; password = ""
-}
-
-# Reasonable message delivery -- send most everything to email address
-# and to the console
-Messages {
- Name = Standard
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r"
- MailOnError = @job_email@ = all
- operator = @job_email@ = mount
- console = all, !skipped, !terminate, !restored
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-Messages {
- Name = NoEmail
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- console = all, !skipped, !terminate
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-
-# Default pool definition
-Pool {
- Name = Default
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-# Label Format = "TEST-${Year}-${Month:p/2/0/r}-${Day:p/2/0/r}:${NumVols}"
-}
+++ /dev/null
-#!/bin/sh
-#
-# Create two blank tapes
-#
-@changer_path@ -f @autochanger@ unload
-@changer_path@ -f @autochanger@ load 1
-mt -f @tape_drive@ rewind
-mt -f @tape_drive@ weof
-@changer_path@ -f @autochanger@ unload
-@changer_path@ -f @autochanger@ load 2
-mt -f @tape_drive@ rewind
-mt -f @tape_drive@ weof
-#@changer_path@ -f @autochanger@ unload
+++ /dev/null
-#!/bin/sh
-#
-# This is the configuration script for regression testing
-#
-
-CFLAGS="-g -O2 -Wall" \
- ./configure \
- --sbindir=$1/bin \
- --sysconfdir=$1/bin \
- --mandir=$1/bin \
- --with-pid-dir=$1/working \
- --with-subsys-dir=$1/working \
- --enable-smartalloc \
- --disable-readline \
- --with-working-dir=$1/working \
- --with-dump-email=$2 \
- --with-job-email=$2 \
- --with-smtp-host=$5 \
- $3 \
- --with-baseport=8101 \
- $4
-
-exit 0
+++ /dev/null
-#!/bin/sh
-#
-# Script to setup running Bacula regression tests
-#
-cwd=`pwd`
-if [ $# != 5 ] ; then
- echo "Incorrect number of arguments. Got $#. Need:"
- echo "setup bacula-src email-address --with-DBNAME --with-tcp-wrappers"
- echo " "
- exit 1
-fi
-if [ ! -d $1 ] ; then
- echo "Arg 1 must be a Bacula release directory."
- echo " "
- exit 1
-fi
-rm -rf build bin
-# Copy new source
-echo "Copying source from $1"
-cp -rp $1 build
-cp scripts/regress-config build
-cd build
-rm -f Makefile config.cache
-# Run Bacula configuration, make, install
-./regress-config ${cwd} $2 $3 $4 $5
-make
-make install
-cp src/tools/testls ../bin
-
-cd ..
-bin/bacula stop
-cd bin
-./create_bacula_database bacula
-./drop_bacula_tables bacula
-./make_bacula_tables bacula
-./grant_bacula_privileges bacula
-cd ..
-# Start and stop Bacula to ensure conf files are OK
-bin/bacula start
-bin/bacula stop
-#
-# Save Bacula default conf files for later use
-#
-cp -f bin/*.conf scripts
+++ /dev/null
-#
-# Default Bacula Director Configuration file
-#
-# The only thing that MUST be changed is to add one or more
-# file or directory names in the Include directive of the
-# FileSet resource.
-#
-# For Bacula release 1.33
-#
-# You might also want to change the default email address
-# from root to your address. See the "mail" and "operator"
-# directives in the Messages resource.
-#
-
-Director { # define myself
- Name = @hostname@-dir
- DIRport = 8101 # where we listen for UA connections
- QueryFile = "@scriptdir@/query.sql"
- WorkingDirectory = "@working_dir@"
- PidDirectory = "@piddir@"
- SubSysDirectory = "@subsysdir@"
- Maximum Concurrent Jobs = 4
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
- Messages = Daemon
-}
-
-#
-# Define the main nightly save backup job
-# By default, this job will back up to disk in /tmp
-Job {
- Name = "NightlySave"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData=yes
-}
-
-Job {
- Name = "MonsterSave"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File1
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-Job {
- Name = "VerifyVolume"
- Type = Verify
- Level = VolumeToCatalog
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-Job {
- Name = "SparseTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="SparseSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-Job {
- Name = "CompressedTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="CompressedSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Maximum Concurrent Jobs = 4
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-Job {
- Name = "SparseCompressedTest"
- Type = Backup
- Client=@hostname@-fd
- FileSet="SparseCompressedSet"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-# Backup the catalog database (after the nightly save)
-Job {
- Name = "BackupCatalog"
- Type = Backup
- Client=@hostname@-fd
- FileSet="Catalog"
-# Schedule = "WeeklyCycleAfterBackup"
- Storage = File
- Messages = Standard
- Pool = Default
- # This creates an ASCII copy of the catalog
- RunBeforeJob = "@sbindir@/make_catalog_backup -u bacula"
- # This deletes the copy of the catalog
- RunAfterJob = "@sbindir@/delete_catalog_backup"
- Write Bootstrap = "@working_dir@/BackupCatalog.bsr"
-}
-
-# Standard Restore template, to be changed by Console program
-Job {
- Name = "RestoreFiles"
- Type = Restore
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Where = /tmp/bacula-restores
-}
-
-
-# List of files to be backed up
-FileSet {
- Name = "Full Set"
- Include { Options { signature=MD5 }
- File = </tmp/file-list
- }
-}
-
-FileSet {
- Name = "SparseSet"
- Include { Options { signature=MD5; sparse=yes }
- File=</tmp/file-list
- }
-}
-
-FileSet {
- Name = "CompressedSet"
- Include {
- Options { signature=MD5; compression=GZIP }
- File =</tmp/file-list
- }
-}
-
-FileSet {
- Name = "SparseCompressedSet"
- Include {
- Options {
- signature=MD5; compression=GZIP
- }
- File= </tmp/file-list
- }
-}
-
-
-
-#
-# When to do the backups, full backup on first sunday of the month,
-# differential (i.e. incremental since full) every other sunday,
-# and incremental backups other days
-Schedule {
- Name = "WeeklyCycle"
- Run = Full 1st sun at 1:05
- Run = Differential 2nd-5th sun at 1:05
- Run = Incremental mon-sat at 1:05
-}
-
-# This schedule does the catalog. It starts after the WeeklyCycle
-Schedule {
- Name = "WeeklyCycleAfterBackup"
- Run = Full sun-sat at 1:10
-}
-
-# This is the backup of the catalog
-FileSet {
- Name = "Catalog"
- Include { Options { signature=MD5 }
- File=/home/kern/bacula/regress/bin/working/bacula.sql
- }
-}
-
-# Client (File Services) to backup
-Client {
- Name = @hostname@-fd
- Address = @hostname@
- FDPort = 8102
- Catalog = MyCatalog
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
- Maximum Concurrent Jobs = 4
-}
-
-# Definiton of file storage device
-Storage {
- Name = File
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage
- Media Type = File
- Maximum Concurrent Jobs = 4
-}
-
-Storage {
- Name = File1
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage1
- Media Type = File1
- Maximum Concurrent Jobs = 4
-}
-
-
-# Definition of DLT tape storage device
-#Storage {
-# Name = DLTDrive
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
-# Device = "HP DLT 80" # must be same as Device in Storage daemon
-# Media Type = DLT8000 # must be same as MediaType in Storage daemon
-#}
-
-# Definition of DDS tape storage device
-#Storage {
-# Name = SDT-10000
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
-# Device = SDT-10000 # must be same as Device in Storage daemon
-# Media Type = DDS-4 # must be same as MediaType in Storage daemon
-#}
-
-# Definition of 8mm tape storage device
-#Storage {
-# Name = "8mmDrive"
-# Address = @hostname@ # N.B. Use a fully qualified name here
-# SDPort = 8103
-# Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-# Device = "Exabyte 8mm"
-# MediaType = "8mm"
-#}
-
-
-# Generic catalog service
-Catalog {
- Name = MyCatalog
- dbname = bacula; user = bacula; password = ""
-}
-
-# Reasonable message delivery -- send most everything to email address
-# and to the console
-Messages {
- Name = Standard
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r"
- MailOnError = @job_email@ = all
- operator = @job_email@ = mount
- console = all, !skipped, !terminate, !restored
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-Messages {
- Name = NoEmail
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- console = all, !skipped, !terminate
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-#
-# Message delivery for daemon messages (no job).
-Messages {
- Name = Daemon
- mailcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) %r\" -s \"Bacula daemon message\" %r"
- mail = @job_email@ = all, !skipped
- console = all, !skipped, !saved
- append = "@working_dir@/log" = all, !skipped
-}
-
-# Default pool definition
-Pool {
- Name = Default
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
+++ /dev/null
-#
-# Default Bacula File Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# There is not much to change here except perhaps the
-# File daemon Name to
-#
-
-#
-# List Directors who are permitted to contact this File daemon
-#
-Director {
- Name = @hostname@-dir
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc"
-}
-
-#
-# "Global" File daemon configuration specifications
-#
-FileDaemon { # this is me
- Name = @hostname@-fd
- FDport = 8102 # where we listen for the director
- WorkingDirectory = "@working_dir@"
- Pid Directory = "@piddir@"
- SubSys Directory = "@subsysdir@"
-}
-
-# Send all messages except skipped files back to Director
-Messages {
- Name = Standard
- director = @hostname@-dir = all, !terminate
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = @hostname@-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "@working_dir@"
- Pid Directory = "@piddir@"
- Subsys Directory = "@subsysdir@"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = @hostname@-dir
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-
-Device {
- Name = FileStorage
- Media Type = File
- Archive Device = @tmpdir@
- LabelMedia = yes; # lets Bacula label unlabelled media
- Random Access = Yes;
- AutomaticMount = yes; # when device opened, read it
- RemovableMedia = no;
- AlwaysOpen = no;
-# Maximum File Size = 10KB
-}
-
-Device {
- Name = FileStorage1
- Media Type = File1
- Archive Device = @tmpdir@
- LabelMedia = yes; # lets Bacula label unlabelled media
- Random Access = Yes;
- AutomaticMount = yes; # when device opened, read it
- RemovableMedia = no;
- AlwaysOpen = no;
-}
-
-
-#Device {
-# Name = "HP DLT 80"
-# Media Type = DLT8000
-# Archive Device = /dev/nst0
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = yes;
-# RemovableMedia = yes;
-#}
-
-#Device {
-# Name = SDT-7000 #
-# Media Type = DDS-2
-# Archive Device = /dev/nst0
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = yes;
-# RemovableMedia = yes;
-#}
-
-#Device {
-# Name = Floppy
-# Media Type = Floppy
-# Archive Device = /mnt/floppy
-# RemovableMedia = yes;
-# Random Access = Yes;
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = no;
-#}
-
-#
-# A very old Exabyte with no end of media detection
-#
-#Device {
-# Name = "Exabyte 8mm"
-# Media Type = "8mm"
-# Archive Device = /dev/nst0
-# Hardware end of medium = No;
-# AutomaticMount = yes; # when device opened, read it
-# AlwaysOpen = Yes;
-# RemovableMedia = yes;
-#}
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = @hostname@-dir = all, !terminate
-}
+++ /dev/null
-#
-# Bacula User Agent (or Console) Configuration File
-#
-
-Director {
- Name = @hostname@-dir
- DIRport = 8101
- address = @hostname@
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3"
-}
+++ /dev/null
-#
-# TestA Bacula Director Configuration file
-#
-# For Bacula release 1.30 (12 April 2003) -- redhat 7.3
-#
-
-Director { # define myself
- Name = @hostname@-dir
- DIRport = 8101 # where we listen for UA connections
- QueryFile = "@scriptdir@/query.sql"
- WorkingDirectory = "@working_dir@"
- PidDirectory = "@piddir@"
- SubSysDirectory = "@subsysdir@"
- Maximum Concurrent Jobs = 1
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
- Messages = Standard
-}
-
-
-Job {
- Name = "MultiVol"
- Type = Backup
- Client=@hostname@-fd
- Level = Full
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Write Bootstrap = "@working_dir@/SmallVols.bsr"
- Pool = SmallVols
- SpoolData = yes
-}
-
-
-Job {
- Name = "VerifyVolume"
- Type = Verify
- Level = VolumeToCatalog
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
-}
-
-
-
-# Standard Restore template, to be changed by Console program
-Job {
- Name = "RestoreFiles"
- Type = Restore
- Client=@hostname@-fd
- FileSet="Full Set"
- Storage = File
- Messages = Standard
- Pool = Default
- Where = /tmp/bacula-restores
-}
-
-
-# List of files to be backed up
-FileSet {
- Name = "Full Set"
- Include { Options { signature=SHA1 }
- File =</tmp/file-list
- }
-}
-
-
-# Client (File Services) to backup
-Client {
- Name = @hostname@-fd
- Address = @hostname@
- FDPort = 8102
- Catalog = MyCatalog
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
-}
-
-# Definiton of file storage device
-Storage {
- Name = File
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage
- Media Type = File
-}
-
-Storage {
- Name = File1
- Address = @hostname@ # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
- Device = FileStorage1
- Media Type = File1
-}
-
-
-# Generic catalog service
-Catalog {
- Name = MyCatalog
- dbname = bacula; user = bacula; password = ""
-}
-
-# Reasonable message delivery -- send most everything to email address
-# and to the console
-Messages {
- Name = Standard
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r"
- MailOnError = @job_email@ = all, !terminate
- operator = @job_email@ = mount
- console = all, !skipped, !terminate
-
- append = "@working_dir@/log" = all, !skipped
-}
-
-Messages {
- Name = NoEmail
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- console = all, !skipped, !terminate, !restored
- append = "@working_dir@/log" = all, !skipped
-}
-
-
-# Default pool definition
-Pool {
- Name = Default
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
-
-Pool {
- Name = SmallVols
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
- Maximum Volumes = 10
- MaximumVolumeBytes = 10M
- LabelFormat = Small
-}
+++ /dev/null
-#
-# Default Bacula Director Configuration file
-#
-# The only thing that MUST be changed is to add one or more
-# file or directory names in the Include directive of the
-# FileSet resource.
-#
-# For Bacula release 1.33
-#
-# You might also want to change the default email address
-# from root to your address. See the "mail" and "operator"
-# directives in the Messages resource.
-#
-
-Director { # define myself
- Name = rufus-dir
- DIRport = 8101 # where we listen for UA connections
- QueryFile = "@scriptdir@/query.sql"
- WorkingDirectory = "@working_dir@"
- PidDirectory = "@piddir@"
- Maximum Concurrent Jobs = 4
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3"
- Messages = Daemon
-}
-
-#
-# Define the main nightly save backup job
-# By default, this job will back up to disk in /tmp
-Job {
- Name = "NightlySave"
- Type = Backup
- Client=Tibs
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Write Bootstrap = "@working_dir@/NightlySave.bsr"
- Maximum Concurrent Jobs = 4
- SpoolData = yes
-}
-
-
-# Standard Restore template, to be changed by Console program
-Job {
- Name = "RestoreFiles"
- Type = Restore
- Client=Tibs
- FileSet="Full Set"
- Storage = DDS-4
- Messages = Standard
- Pool = Default
- Where = /tmp/bacula-restores
-}
-
-
-# List of files to be backed up
-FileSet {
- Name = "Full Set"
- Enable VSS = yes
- Include { Options { signature=MD5; portable=no
- compression=GZIP }
-# File = "c:/cygwin" # big
- File = "c:/cygwin/home/kern/bacula/k"
- }
-}
-
-
-#
-# When to do the backups, full backup on first sunday of the month,
-# differential (i.e. incremental since full) every other sunday,
-# and incremental backups other days
-Schedule {
- Name = "WeeklyCycle"
- Run = Full 1st sun at 1:05
- Run = Differential 2nd-5th sun at 1:05
- Run = Incremental mon-sat at 1:05
-}
-
-# Client (File Services) to backup
-Client {
- Name = Tibs
- Address = tibs
- FDPort = 9102
- Catalog = MyCatalog
- Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3"
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
- Maximum Concurrent Jobs = 4
-}
-
-Client {
- Name = localhost-fd
- Address = localhost
- FDPort = 8102
- Catalog = MyCatalog
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc"
- File Retention = 30d # 30 days
- Job Retention = 180d # six months
- AutoPrune = yes # Prune expired Jobs/Files
- Maximum Concurrent Jobs = 4
-}
-
-
-# Definition of DDS tape storage device
-Storage {
- Name = DDS-4
- Address = 192.168.68.112 # N.B. Use a fully qualified name here
- SDPort = 8103
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" # password for Storage daemon
- Device = DDS-4 # must be same as Device in Storage daemon
- Media Type = DDS-4 # must be same as MediaType in Storage daemon
- AutoChanger = yes
- Maximum Concurrent Jobs = 4
-}
-
-
-# Generic catalog service
-Catalog {
- Name = MyCatalog
- dbname = bacula; user = bacula; password = ""
-}
-
-# Reasonable message delivery -- send most everything to email address
-# and to the console
-Messages {
- Name = Standard
- mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
- operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r"
- MailOnError = @job_email@ = all, !terminate
- operator = @job_email@ = mount
- console = all, !skipped, !terminate, !restored
-#
-# WARNING! the following will create a file that you must cycle from
-# time to time as it will grow indefinitely. However, it will
-# also keep all your messages if the scroll off the console.
-#
- append = "@working_dir@/log" = all, !skipped
-}
-
-#
-# Message delivery for daemon messages (no job).
-Messages {
- Name = Daemon
- mailcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) %r\" -s \"Bacula daemon message\" %r"
- mail = @job_email@ = all, !skipped
- console = all, !skipped, !saved
- append = "@working_dir@/log" = all, !skipped
-}
-
-
-# Default pool definition
-Pool {
- Name = Default
- Pool Type = Backup
- Recycle = yes # Bacula can automatically recycle Volumes
- AutoPrune = yes # Prune expired volumes
- Volume Retention = 365d # one year
- Accept Any Volume = yes # write on any volume in the pool
-}
+++ /dev/null
-#
-# Default Bacula File Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# There is not much to change here except perhaps the
-# File daemon Name to
-#
-
-#
-# List Directors who are permitted to contact this File daemon
-#
-Director {
- Name = rufus-dir
- Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc"
-}
-
-#
-# "Global" File daemon configuration specifications
-#
-FileDaemon { # this is me
- Name = localhost-fd
- FDport = 8102 # where we listen for the director
- WorkingDirectory = "/home/kern/bacula/regress/working"
- Pid Directory = "/home/kern/bacula/regress/working"
- SubSys Directory = "/home/kern/bacula/regress/working"
-}
-
-# Send all messages except skipped files back to Director
-Messages {
- Name = Standard
- director = rufus-dir = all, !terminate
-}
+++ /dev/null
-#
-# Default Bacula Storage Daemon Configuration file
-#
-# For Bacula release 1.33
-#
-# You may need to change the name of your tape drive
-# on the "Archive Device" directive in the Device
-# resource. If you change the Name and/or the
-# "Media Type" in the Device resource, please ensure
-# that dird.conf has corresponding changes.
-#
-
-Storage { # definition of myself
- Name = localhost-sd
- SDPort = 8103 # Director's port
- WorkingDirectory = "/home/kern/bacula/regress/working"
- Pid Directory = "/home/kern/bacula/regress/working"
- Subsys Directory = "/home/kern/bacula/regress/working"
-}
-
-#
-# List Directors who are permitted to contact Storage daemon
-#
-Director {
- Name = rufus-dir
- Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
-}
-
-#
-# Devices supported by this Storage daemon
-# To connect, the Director's bacula-dir.conf must have the
-# same Name and MediaType.
-#
-
-Device {
- Name = DDS-4 #
- Media Type = DDS-4
- Archive Device = /dev/nst0
- AutomaticMount = yes; # when device opened, read it
- AlwaysOpen = yes;
- RemovableMedia = yes;
- @/home/kern/bacula/regress/bin/tape_options
-# Maximum File Size = 1000000
-# MaximumVolumeSize = 100M
-}
-
-#
-# Send all messages to the Director,
-# mount messages also are sent to the email address
-#
-Messages {
- Name = Standard
- director = rufus-dir = all, !terminate
-}
+++ /dev/null
-#!/usr/bin/python
-from time import time as now
-
-fn = open('time.out', 'w+')
-fn.write('%s' % now())
-fn.close()
+++ /dev/null
-#!/bin/sh
-./all-non-root-tape-tests
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do an Incremental and restore those two files.
-#
-# This script uses the autochanger and two tapes
-#
-. config.out
-if test x${TAPE_DRIVE1} = x/dev/null ; then
- echo "Skipping 2drive-incremenatal-2tape test. No second drive."
- exit
-fi
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-bin/bacula stop 2>&1 >/dev/null
-cd bin
-./drop_bacula_tables >/dev/null 2>&1
-./make_bacula_tables >/dev/null 2>&1
-./grant_bacula_privileges 2>&1 >/dev/null
-cd ..
-
-scripts/copy-2drive-confs
-scripts/cleanup-2drive
-echo "${cwd}/tmp/build" >/tmp/file-list
-if test ! -d ${cwd}/tmp/build ; then
- mkdir ${cwd}/tmp/build
-fi
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-# Turn off Prefer Mounted Volumes so we use 2 drives
-outf="tmp/sed_tmp"
-echo "s%# Prefer Mounted Volumes% Prefer Mounted Volumes%g" >${outf}
-cp ${cwd}/bin/bacula-dir.conf ${cwd}/tmp/1
-# Comment the next line out to write everything to one drive
-# otherwise, it writes the two jobs to different drives
-sed -f ${outf} ${cwd}/tmp/1 >${cwd}/bin/bacula-dir.conf
-
-echo " "
-echo " "
-echo " === Starting 2drive-incremental-2tape test ==="
-echo " === Starting 2drive-incremental-2tape test ===" >>working/log
-echo " "
-
-# Write out bconsole commands
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=1 Pool=Default drive=0
-label storage=DDS-4 volume=TestVolume002 slot=2 Pool=Default drive=1
-@#setdebug level=100 storage=DDS-4
-run job=NightlySave yes
-run job=NightlySave yes
-run job=NightlySave yes
-run job=NightlySave yes
-run job=NightlySave yes
-@sleep 3
-status storage=DDS-4
-wait
-list volumes
-list jobs
-status storage=DDS-4
-messages
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-@# Force Incremental on the second Volume
-update volume=TestVolume001 VolStatus=Used
-status storage=DDS-4
-@#setdebug level=400 storage=DDS-4
-run level=Incremental job=NightlySave yes
-wait
-list volumes
-status storage=DDS-4
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores
-7
-<${cwd}/tmp/restore-list
-
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! 2drive-incremental-2tape test Bacula source failed!!! !!!!! "
- echo " !!!!! 2drive-incremental-2tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== 2drive-incremental-2tape test Bacula source OK ===== "
- echo " ===== 2drive-incremental-2tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Test if Bacula can automatically create a Volume label.
-#
-
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-cp ${cwd}/bin/bacula-dir.conf ${cwd}/tmp/1
-sed "s%# Label Format% Label Format%" ${cwd}/tmp/1 >${cwd}/bin/bacula-dir.conf
-
-echo " "
-echo " "
-echo " === Starting auto-label-test at `date +%R:%S` ==="
-echo " === Starting auto-label-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-status all
-status all
-list pools
-messages
-@$out tmp/log1.out
-run job=CompressedTest storage=File yes
-list pools
-list volumes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-count
-ls *
-dir *
-find Makefile
-pwd
-lsmark
-estimate
-?
-help
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! auto-label-test failed!!! !!!!! "
- echo " !!!!! auto-label-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== auto-label-test OK ===== "
- echo " ===== auto-label-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape then restore it, we do that twice to ensure that
-# we can correctly append to a tape.
-# We also use the purge and the relabel commands as
-# well as a pile of status storage commands.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting Backup Bacula tape test at `date +%R:%S` ==="
-echo " === Starting Backup Bacula tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-# Write out bconsole commands
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-purge volume=TestVolume001
-relabel pool=Default storage=DDS-4 oldVolume=TestVolume001 volume=TestVolume002 slot=0
-purge volume=TestVolume002
-relabel pool=Default storage=DDS-4 oldVolume=TestVolume002 volume=TestVolume001 slot=0
-run job=NightlySave yes
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-@sleep 1
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-@sleep 1
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-@sleep 1
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-@sleep 1
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-status storage=DDS-4
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-list volumes
-messages
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-bin/bacula stop 2>&1 >/dev/null
-#
-# Now do a second backup after making a few changes
-#
-touch ${cwd}/build/src/dird/*.c
-echo "test test" > ${cwd}/build/src/dird/xxx
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-list volumes
-run job=NightlySave yes
-wait
-list volumes
-messages
-@#
-@# now do a second restore
-@#
-@$out tmp/log2.out
-list volumes
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-list volumes
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Backup Bacula tape test failed!!! !!!!! "
- echo " !!!!! Backup Bacula tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Backup Bacula tape test OK ===== "
- echo " ===== Backup Bacula tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-confs
-scripts/cleanup
-
-echo " "
-echo " "
-echo " === Starting Backup Bacula Test at `date +%R:%S` ==="
-echo " === Starting Backup Bacula Test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label volume=TestVolume001
-@#setdebug level=100 storage=File
-run job=Client1 yes
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-sleep 1
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-sleep 1
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-status storage=File
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Backup Bacula Test failed!!! !!!!! "
- echo " !!!!! Backup Bacula Test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== Backup Bacula Test OK ===== "
- echo " ===== Backup Bacula Test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# split the archive into two volumes, then build a BSR with
-# the restore command and use bextract to restore the files.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting bextract-test at `date +%R:%S` ==="
-echo " === Starting bextract-test at `date +%R:%S` ===" >working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=File1 volume=TestVolume001
-label storage=File1 volume=TestVolume002
-update Volume=TestVolume001 MaxVolBytes=3000000
-run job=NightlySave storage=File1 yes
-wait
-messages
-@#
-@# now build the bsr file
-@#
-@$out tmp/log2.out
-restore bootstrap=${cwd}/working/restore.bsr where=${cwd}/tmp/bacula-restores select all storage=File1 done
-no
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-mkdir -p ${cwd}/tmp/bacula-restores
-bin/bextract -b working/restore.bsr -c bin/bacula-sd.conf ${cwd}/tmp ${cwd}/tmp/bacula-restores 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! bextract-test Bacula source failed!!! !!!!! "
- echo " !!!!! bextract-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== bextract-test Bacula source OK ===== "
- echo " ===== bextract-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then backup four times, each with incremental then
-# do a bscan and restore.
-# It should require at least 4 different bsrs.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed "s%# Maximum File Size% Maximum File Size%" ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting bscan-tape at `date +%R:%S` ==="
-echo " === Starting bscan-tape at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-estimate job=NightlySave listing
-estimate job=NightlySave listing
-estimate job=NightlySave listing
-messages
-@$out tmp/log1.out
-setdebug level=2 storage=DDS-4
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave yes
-wait
-run job=NightlySave level=Full yes
-wait
-run job=NightlySave level=Full yes
-wait
-messages
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 1 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/dird/*.c ${cwd}/build/src/dird/*.o
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-
-#
-# run a second job
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 2 done"
-touch ${cwd}/build/src/dird/*.c
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#
-# run a third job
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 3 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#echo "abc" > ${cwd}/build/src/lib/dummy
-#
-# run a fourth job
-#
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 4 done"
-#
-# now drop and recreate the database
-#
-cd bin
-./drop_bacula_tables >/dev/null 2>&1
-./make_bacula_tables >/dev/null 2>&1
-./grant_bacula_privileges 2>&1 >/dev/null
-cd ..
-
-bin/bacula stop 2>&1 >/dev/null
-echo "volume=TestVolume001" >tmp/bscan.bsr
-bin/bscan -w working -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf DDS-4 2>&1 >/dev/null
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@$out /dev/null
-messages
-@$out tmp/log2.out
-@#
-@# now do a restore
-@#
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-rm -f ${cwd}/build/src/lib/dummy
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! bscan-tape Bacula source failed!!! !!!!! "
- echo " !!!!! bscan-tape failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== bscan-tape Bacula source OK ===== "
- echo " ===== bscan-tape OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# split the archive into two volumes then bscan it
-# into the catalog after the backup. It also to a limited
-# extent tests the purge volume and delete volume commands.
-#
-
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting bscan-test at `date +%R:%S` ==="
-echo " === Starting bscan-test at `date +%R:%S` ===" >working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File1
-TestVolume001
-label storage=File1
-TestVolume002
-update Volume=TestVolume001 MaxVolBytes=3000000
-run job=NightlySave storage=File1
-yes
-wait
-messages
-@$out /dev/null
-@#
-@# now purge the Volume
-@#
-purge volume=TestVolume001
-purge volume=TestVolume002
-delete volume=TestVolume001
-yes
-delete volume=TestVolume002
-yes
-messages
-quit
-END_OF_DATA
-
-# run backup
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-echo "volume=TestVolume001|TestVolume002" >tmp/bscan.bsr
-
-if test "$debug" -eq 1 ; then
- bin/bscan -w working -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp
-else
- bin/bscan -w working -m -s -v -b tmp/bscan.bsr -c bin/bacula-sd.conf ${cwd}/tmp 2>&1 >/dev/null
-fi
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log2.out
-@#
-@# now do a restore
-@#
-@#setdebug level=400 storage=File1
-restore bootstrap=${cwd}/tmp/kern.bsr where=${cwd}/tmp/bacula-restores select all storage=File1 done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-# now run restore
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! bscan-test Bacula source failed!!! !!!!! "
- echo " !!!!! bscan-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== bscan-test Bacula source OK ===== "
- echo " ===== bscan-test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# split the archive into two volumes, then restore
-# files on only one of the volumes and ensure that
-# the other volume is not used. I.e. bsr optimization
-# works.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting bsr-opt-test at `date +%R:%S` ==="
-echo " === Starting bsr-opt-test at `date +%R:%S` ===" >working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File1 volume=TestVolume001
-label storage=File1 volume=TestVolume002
-update Volume=TestVolume001 MaxVolBytes=3000000
-run job=NightlySave storage=File1 yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore bootstrap=${cwd}/working/restore.bsr where=${cwd}/tmp/bacula-restores select storage=File1
-unmark *
-cd ${cwd}/build/src/cats
-mark *
-ls
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-#
-# This test is not really reliable. What we want to do is
-# to select files on only one Volume, then insure here
-# that only one Volume is chosen.
-#
-grep TestVolume002 working/restore.bsr 2>&1 >/dev/null
-bsrstat=$?
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build/src/cats tmp/bacula-restores${cwd}/build/src/cats 2>&1 >/dev/null
-if [ $? != 0 -o $bsrstat != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! bsr-opt-test Bacula source failed!!! !!!!! "
- echo " !!!!! bsr-opt-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- elif [ $bsrstat != 0 ] ; then
- echo " !!!!! Volume selection error !!!!! "
- echo " !!!!! Volume selection error !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== bsr-opt-test Bacula source OK ===== "
- echo " ===== bsr-opt-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Test the fill command in btape
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo " "
-echo " "
-echo " === Starting btape fill test at `date +%R:%S` ==="
-echo " === Starting btape fill test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/btape -c bin/bacula-sd.conf DDS-4 <<END_OF_DATA 2>&1 >tmp/log1.out
-fill
-s
-
-quit
-END_OF_DATA
-
-
-grep "^The last block on the tape matches\. Test succeeded\." tmp/log1.out 2>&1 >/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! btape fill test failed!!! !!!!! "
- echo " !!!!! btape fill test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== btape fill test OK ===== "
- echo " ===== btape fill test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Test the fill command in btape
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed "s%# MaximumVolumeSize% MaximumVolumeSize%" ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting btape fill test at `date +%R:%S` ==="
-echo " === Starting btape fill test at `date +%R:%S` ===" >>working/log
-echo " "
-exit
-bin/btape -c bin/bacula-sd.conf DDS-4 <<END_OF_DATA 2>&1 >tmp/log1.out
-fill
-s
-
-quit
-END_OF_DATA
-
-
-grep "^The last block on the tape matches\. Test succeeded\." tmp/log1.out 2>&1 >/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! btape fill test failed!!! !!!!! "
- echo " !!!!! btape fill test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== btape fill test OK ===== "
- echo " ===== btape fill test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then restore it.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting compressed-test at `date +%R:%S` ==="
-echo " === Starting compressed-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-exit
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@tee /dev/null
-status all
-status all
-messages
-@tee tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest storage=File yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@tee tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@tee
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! compressed-test Bacula source failed!!! !!!!! "
- echo " !!!!! compressed-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== compressed-test Bacula source OK ===== "
- echo " ===== compressed-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run two jobs at the same time
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/largefile" >/tmp/file-list
-if test -c /dev/urandom ; then
-# Create 56MB file with random data
- echo "Creating a 56MB file with random data ..."
- dd if=/dev/urandom of=${cwd}/tmp/largefile bs=1024 count=55000
-else
- echo "Creating a 56MB file with bacula-dir data ..."
- dd if=bin/bacula-dir of=${cwd}/tmp/1 bs=1024 count=1000
- cat ${cwd}/tmp/1 ${cwd}/tmp/1 ${cwd}/tmp/1 ${cwd}/tmp/1 ${cwd}/tmp/1 >${cwd}/tmp/2
- rm -f ${cwd}/tmp/1
- cat ${cwd}/tmp/2 ${cwd}/tmp/2 ${cwd}/tmp/2 ${cwd}/tmp/2 ${cwd}/tmp/2 >>${cwd}/tmp/3
- rm -f ${cwd}/tmp/2
- cat ${cwd}/tmp/3 ${cwd}/tmp/3 ${cwd}/tmp/3 ${cwd}/tmp/3 ${cwd}/tmp/3 >${cwd}/tmp/largefile
- rm -f ${cwd}/tmp/3
-fi
-
-echo "largefile created"
-
-echo " "
-echo " "
-echo " === Starting concurrent-jobs-test at `date +%R:%S` ==="
-echo " === Starting concurrent-jobs-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest level=Full yes
-run job=CompressedTest level=Full yes
-run job=CompressedTest level=Full yes
-run job=CompressedTest level=Full yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff tmp/largefile tmp/bacula-restores${cwd}/tmp/largefile 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! concurrent-jobs-test Bacula source failed!!! !!!!! "
- echo " !!!!! concurrent-jobs-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== concurrent-jobs-test Bacula source OK ===== "
- echo " ===== concurrent-jobs-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-MUID=`/usr/bin/id -u`
-if [ $MUID != 0 ] ; then
- echo " "
- echo "You must be root to run this test."
- echo " ===== !!!! dev-test-root not run !!!! ===== "
- echo " ===== !!!! dev-test-root not run !!!! ===== " >>test.out
- echo " "
- exit 1
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "/dev" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting dev-test-root test ==="
-echo " "
-echo " === Note, this test can fail for trivial ==="
-echo " === reasons on non-Linux systems. ==="
-echo " "
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-cd /
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-etc-test dev >${cwd}/tmp/original
-cd ${cwd}/tmp/bacula-restores
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-etc-test dev >${cwd}/tmp/restored
-cd ${cwd}/tmp
-#
-# Use sed to cut out parts that *always* change
-#
-cat >sed.scr <<END_OF_DATA
-s%.*dev$%dev%
-s%.*[0-9][0-9]:[0-9][0-9]:[0-9][0-9] dev/ptmx%dev/ptmx%
-s%.*[0-9][0-9]:[0-9][0-9]:[0-9][0-9] dev/ttyp1%dev/ttyp1%
-s%.*[0-9][0-9]:[0-9][0-9]:[0-9][0-9] dev/null%dev/null%
-END_OF_DATA
-
-# strip file system change messages then sed and sort
-grep -v "Skip: File system change prohibited." original >1
-sed -f sed.scr 1 | sort >original
-#
-mv -f restored 1
-sed -f sed.scr 1 | sort >restored
-rm -f sed.scr
-#
-cd ${cwd}
-diff tmp/original tmp/restored 2>&1 1>/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " ===== !!!! dev-test-root failed !!!! ===== "
- echo " ===== !!!! dev-test-root failed !!!! ===== " >>test.out
- echo " "
-else
- echo " ===== dev-test-root OK ===== "
- echo " ===== dev-test-root OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do a differential and restore those two files.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/build" >/tmp/file-list
-mkdir ${cwd}/tmp/build
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting differential-test at `date +%R:%S` ==="
-echo " === Starting differential-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File volume=TestVolume002
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-@# Force differential on the second Volume
-update volume=TestVolume002 VolStatus=Used
-run level=differential job=CompressedTest yes
-wait
-messages
-@$out
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run level=incremental job=CompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=File file=<${cwd}/tmp/restore-list
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restore the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! differential-test Bacula source failed!!! !!!!! "
- echo " !!!!! differential-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== differential-test Bacula source OK ===== "
- echo " ===== differential-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to two tapes where the maximum tape file size is set to 1M
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-out="tmp/sed_tmp"
-echo "s%# Maximum File Size% Maximum File Size%g" >${out}
-cp -f ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed -f ${out} ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting eot-fail-tape test at `date +%R:%S` ==="
-echo " === Starting eot-fail-tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start -d100
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@tee /dev/null
-messages
-@tee tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-update Volume=TestVolume001 MaxVolBytes=3000000
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@tee tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-messages
-@tee
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! eot-fail-tape test Bacula source failed!!! !!!!! "
- echo " !!!!! eot-fail-tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== eot-fail-tape test Bacula source OK ===== "
- echo " ===== eot-fail-tape test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-MUID=`/usr/bin/id -u`
-if [ $MUID != 0 ] ; then
- echo " "
- echo "You must be root to run this test."
- echo " ===== !!!! etc-test-root not run !!!! ===== "
- echo " ===== !!!! etc-test-root not run !!!! ===== " >>test.out
- echo " "
- exit 1
-fi
-echo " "
-echo " "
-echo " === Starting /etc save/restore test ==="
-echo " "
-echo " "
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "/etc" >/tmp/file-list
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File
-TestVolume001
-run job=NightlySave
-yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-cd /
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-etc-test etc >${cwd}/tmp/1
-cd ${cwd}/tmp/bacula-restores
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-etc-test etc >${cwd}/tmp/2
-sort <${cwd}/tmp/1 >${cwd}/tmp/original
-sort <${cwd}/tmp/2 >${cwd}/tmp/restored
-rm -f ${cwd}/tmp/1 ${cwd}/tmp/2
-cd ${cwd}
-diff tmp/original tmp/restored 2>&1 1>/dev/null
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " ===== !!!! etc-test-root failed !!!! ===== "
- echo " ===== !!!! etc-test-root failed !!!! ===== " >>test.out
- echo " "
-else
- echo " ===== etc-test-root OK ===== "
- echo " ===== etc-test-root OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape where we set the minimum and maximum block
-# sizes.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-echo "s%# Maximum Block Size% Maximum Block Size%" >${cwd}/tmp/2
-echo "s%# Minimum Block Size% Minimum Block Size%" >>${cwd}/tmp/2
-sed -f ${cwd}/tmp/2 ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo "!!!! sed problem in Fixed Block Size test !!!!!"
- echo " "
- exit 1
-fi
-rm -f ${cwd}/tmp/1 ${cwd}/tmp/2
-
-echo " "
-echo " "
-echo " === Starting Fixed Block Size test at `date +%R:%S` ==="
-echo " === Starting Fixed Block Size test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=DDS-4
-unmark *
-mark *
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Fixed Block Size test Bacula source failed!!! !!!!! "
- echo " !!!!! Fixed Block Size test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Fixed Block Size test Bacula source OK ===== "
- echo " ===== Fixed Block Size test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run two jobs at the same time
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting four-concurrent-jobs-tape at `date +%R:%S` ==="
-echo " === Starting four-concurrent-jobs-tape at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave level=Full Storage=DDS-4 yes
-run job=NightlySave level=Full Storage=DDS-4 yes
-run job=NightlySave level=Full Storage=DDS-4 yes
-run job=NightlySave level=Full Storage=DDS-4 yes
-@sleep 2
-status dir
-@sleep 5
-status dir
-status storage=DDS-4
-messages
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=DDS-4
-unmark *
-mark *
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! four-concurrent-jobs-tape Bacula source failed!!! !!!!! "
- echo " !!!!! four-concurrent-jobs-tape failed!!! !!!!! " >>test.out
- echo " "
- exit 1
-else
- echo " ===== four-concurrent-jobs-tape Bacula source OK ===== "
- echo " ===== four-concurrent-jobs-tape OK ===== " >>test.out
- scripts/cleanup
- exit 0
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run two jobs at the same time
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting four-concurrent-jobs-test at `date +%R:%S` ==="
-echo " === Starting four-concurrent-jobs-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File1
-TestVolume001
-label storage=File1
-TestVolume002
-update Volume=TestVolume001 MaxVolBytes=100000000
-@#50000000
-@#12
-run job=NightlySave level=Full Storage=File1
-yes
-reload
-run job=NightlySave level=Full Storage=File1
-yes
-reload
-run job=NightlySave level=Full Storage=File1
-yes
-reload
-run job=NightlySave level=Full Storage=File1
-yes
-reload
-reload
-reload
-reload
-@sleep 2
-status dir
-reload
-@sleep 5
-messages
-reload
-reload
-wait
-reload
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File1
-unmark *
-mark *
-done
-yes
-wait
-reload
-reload
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File1
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! four-concurrent-jobs-test Bacula source failed!!! !!!!! "
- echo " !!!!! four-concurrent-jobs-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
- exit 1
-else
- echo " ===== four-concurrent-jobs-test Bacula source OK ===== "
- echo " ===== four-concurrent-jobs-test OK ===== " >>test.out
-# scripts/cleanup
- exit 0
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then backup four times, each with incremental then finally restore.
-# It should require at least 4 different bsrs.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting four-jobs-tape at `date +%R:%S` ==="
-echo " === Starting four-jobs-tape at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-estimate job=NightlySave listing
-estimate job=NightlySave listing
-estimate job=NightlySave listing
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 1 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/dird/*.c ${cwd}/build/src/dird/*.o
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-
-#
-# run a second job
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 2 done"
-touch ${cwd}/build/src/dird/*.c
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#
-# run a third job
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 3 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#
-# run a fourth job
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-run job=NightlySave level=Incremental yes
-wait
-messages
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "Backup 4 done"
-#
-# now do several restores to ensure we cleanup between jobs
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-@$out tmp/log2.out
-@#
-@# now unmount the tape and start two restores
-@# at the same time
-@#
-unmount storage=DDS-4
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-restore where=${cwd}/tmp/bacula-restores select
-unmark *
-mark *
-done
-yes
-mount storage=DDS-4
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! four-jobs-tape Bacula source failed!!! !!!!! "
- echo " !!!!! four-jobs-tape failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== four-jobs-tape Bacula source OK ===== "
- echo " ===== four-jobs-tape OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then backup four times, each with incremental then finally restore.
-# It should require at least 4 different bsrs.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting four-jobs-test at `date +%R:%S` ==="
-echo " === Starting four-jobs-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-estimate job=CompressedTest listing
-estimate job=CompressedTest listing
-estimate job=CompressedTest listing
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "Backup 1 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/dird/*.c ${cwd}/build/src/dird/*.o
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-
-#
-# run a second job
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run job=CompressedTest
-yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "Backup 2 done"
-touch ${cwd}/build/src/dird/*.c
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#
-# run a third job
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "Backup 3 done"
-# make some files for the incremental to pick up
-touch ${cwd}/build/src/lib/*.c ${cwd}/build/src/lib/*.o
-#
-# run a fourth job
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "Backup 4 done"
-#
-# now do several restores to ensure we cleanup between jobs
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-restore where=${cwd}/tmp/bacula-restores select all storage=File done
-yes
-wait
-restore where=${cwd}/tmp/bacula-restores select all storage=File done
-yes
-wait
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! four-jobs-test Bacula source failed!!! !!!!! "
- echo " !!!!! four-jobs-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== four-jobs-test Bacula source OK ===== "
- echo " ===== four-jobs-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do an Incremental and restore those two files.
-#
-# This script uses the autochanger and two tapes
-#
-. config.out
-if test x${AUTOCHANGER} = x/dev/null ; then
- echo "incremental-2tape test skipped. No autochanger."
- exit
-fi
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-bin/bacula stop 2>&1 >/dev/null
-cd bin
-./drop_bacula_tables >/dev/null 2>&1
-./make_bacula_tables >/dev/null 2>&1
-./grant_bacula_privileges 2>&1 >/dev/null
-cd ..
-
-scripts/copy-2tape-confs
-scripts/cleanup-2tape
-echo "${cwd}/tmp/build" >/tmp/file-list
-if test ! -d ${cwd}/tmp/build ; then
- mkdir ${cwd}/tmp/build
-fi
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting incremental-2tape test ==="
-echo " === Starting incremental-2tape test ===" >>working/log
-echo " "
-
-# Write out bconsole commands
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=1 Pool=Default drive=0
-label storage=DDS-4 volume=TestVolume002 slot=2 Pool=Default drive=0
-run job=NightlySave yes
-wait
-messages
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@$out /dev/null
-messages
-@$out tmp/log1.out
-@# Force Incremental on the second Volume
-update volume=TestVolume001 VolStatus=Used
-run level=Incremental job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores
-7
-<${cwd}/tmp/restore-list
-
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! incremental-2tape test Bacula source failed!!! !!!!! "
- echo " !!!!! incremental-2tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== incremental-2tape test Bacula source OK ===== "
- echo " ===== incremental-2tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do an Incremental and restore those two files.
-#
-cwd=`pwd`
-
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/tmp/build" >/tmp/file-list
-if test ! -d ${cwd}/tmp/build ; then
- mkdir ${cwd}/tmp/build
-fi
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting incremental-tape test at `date +%R:%S` ==="
-echo " === Starting incremental-tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=DDS-4
-7
-<${cwd}/tmp/restore-list
-
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! incremental-tape test Bacula source failed!!! !!!!! "
- echo " !!!!! incremental-tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== incremental-tape test Bacula source OK ===== "
- echo " ===== incremental-tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do an Incremental and restore those two files.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/build" >/tmp/file-list
-mkdir ${cwd}/tmp/build
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting incremental-test at `date +%R:%S` ==="
-echo " === Starting incremental-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-label storage=File volume=TestVolume002
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-#
-# Now create two new files to be restored later
-#
-sleep 1
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-cp -f ${cwd}/tmp/build/dird.c ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-@# Force Incremental on the second Volume
-update volume=TestVolume001 VolStatus=Used
-run level=Differential job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-touch ${cwd}/tmp/build/ficheriro1.txt
-touch ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-cd ${cwd}/tmp/build
-cp -f ficheriro2.txt 1
-sed "s%a%b%g" 1 >ficheriro2.txt
-rm -f 1
-cd ${cwd}
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Differential job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-touch ${cwd}/tmp/build/ficheriro1.txt
-touch ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-touch ${cwd}/tmp/build/ficheriro1.txt
-touch ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-touch ${cwd}/tmp/build/ficheriro1.txt
-touch ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-sleep 1
-touch ${cwd}/tmp/build/ficheriro1.txt
-touch ${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=Incremental job=CompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=File file=<${cwd}/tmp/restore-list
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! incremental-test Bacula source failed!!! !!!!! "
- echo " !!!!! incremental-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== incremental-test Bacula source OK ===== "
- echo " ===== incremental-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the /lib directory
-# then restore it.
-#
-cwd=`pwd`
-bin/bacula stop 2>&1 >/dev/null
-cd bin
-./drop_bacula_tables >/dev/null 2>&1
-./make_bacula_tables >/dev/null 2>&1
-./grant_bacula_privileges 2>&1 >/dev/null
-cd ..
-
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "/lib" >/tmp/file-list
-echo " "
-echo " "
-echo " === Starting lib-tape-root test ==="
-echo " "
-echo " "
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 Volume=TestVolume001 slot=0
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-cd /
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-usr-test lib >${cwd}/tmp/original
-cd ${cwd}/tmp/bacula-restores
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-usr-test lib >${cwd}/tmp/restored
-cd ${cwd}/tmp
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% original >1
-sort <1 >original
-#
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% restored >1
-sort <1 >restored
-rm -f 1
-#
-cd ${cwd}
-diff tmp/original tmp/restored 2>&1 1>/dev/nul
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " ===== lib-tape-root failed!!! ===== "
- echo " ===== lib-tape-root failed!!! ===== " >>test.out
- echo " "
-else
- echo " ===== lib-tape-root OK ===== "
- echo " ===== lib-tape-root OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-MUID=`/usr/bin/id -u`
-if [ $MUID != 0 ] ; then
- echo " "
- echo "You must be root to run this test."
- echo " ===== !!!! lib-test-root not run !!!! ===== "
- echo " ===== !!!! lib-test-root not run !!!! ===== " >>test.out
- echo " "
- exit 1
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "/lib" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting /lib save/restore test ==="
-echo " "
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File
-TestVolume001
-run job=NightlySave
-yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-cd /
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-lib-test lib >${cwd}/tmp/original
-cd ${cwd}/tmp/bacula-restores
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-lib-test lib >${cwd}/tmp/restored
-cd ${cwd}/tmp
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% original >1
-sort <1 >original
-#
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% restored >1
-sort <1 >restored
-rm -f 1
-#
-cd ${cwd}
-diff tmp/original tmp/restored 2>&1 1>/dev/nul
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " ===== !!!! lib-test-root failed !!!! ===== "
- echo " ===== !!!! lib-test-root failed !!!! ===== " >>test.out
- echo " "
-else
- echo " ===== lib-test-root OK ===== "
- echo " ===== lib-test-root OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory then create some
-# new files, do a Decremental then a bunch of query commands
-# and finally restore the two files.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/build" >/tmp/file-list
-mkdir ${cwd}/tmp/build
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp
-echo "${cwd}/tmp/build/ficheriro1.txt" >restore-list
-echo "${cwd}/tmp/build/ficheriro2.txt" >>restore-list
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting query-test at `date +%R:%S` ==="
-echo " === Starting query-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-label storage=File volume=TestVolume002
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "ficheriro1.txt" >${cwd}/tmp/build/ficheriro1.txt
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-@# Force decremental on the second Volume
-update volume=TestVolume001 VolStatus=Used
-run level=decremental job=CompressedTest yes
-wait
-messages
-@output
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "ficheriro2.txt" >${cwd}/tmp/build/ficheriro2.txt
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run level=incremental job=CompressedTest yes
-wait
-messages
-@#
-@# Now do the queries
-@#
-query
-1
-query
-2
-ficheriro1.txt
-query
-3
-${cwd}/tmp/build/
-ficheriro1.txt
-Client1
-query
-7
-TestVolume001
-query
-8
-1
-query
-9
-Client1
-query
-10
-Default
-query
-11
-query
-12
-query
-13
-1
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=File file=<${cwd}/tmp/restore-list
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f tmp/build/*.c
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! query-test Bacula source failed!!! !!!!! "
- echo " !!!!! query-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== query-test Bacula source OK ===== "
- echo " ===== query-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# create three volumes and do six backups causing the
-# volumes to be recycled, and cycling through the volumes
-# twice. Tests maxvoljobs and volretention.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting recycle-test at `date +%R:%S` ==="
-echo " === Starting recycle-test at `date +%R:%S` ===" >working/log
-echo " "
-
-#bin/bacula start 2>&1 >/dev/null
-bin/bacula start
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-label storage=File1 volume=TestVolume001
-label storage=File1 volume=TestVolume002
-label storage=File1 volume=TestVolume003
-update Volume=TestVolume001 volretention=10s
-update Volume=TestVolume001 maxvoljobs=1
-update Volume=TestVolume002 volretention=10s
-update Volume=TestVolume002 maxvoljobs=1
-update Volume=TestVolume003 volretention=10s
-update Volume=TestVolume003 maxvoljobs=1
-list volumes
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-list volumes
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-list volumes
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-list volumes
-@sleep 10
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-list volumes
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-@output tmp/log1.out
-list volumes
-run job=NightlySave storage=File1 level=full yes
-wait
-messages
-list volumes
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File1
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! recycle-test Bacula source failed!!! !!!!! "
- echo " !!!!! recycle-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== recycle-test Bacula source OK ===== "
- echo " ===== recycle-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then backup four times, each with incremental then finally restore.
-# It should require at least 4 different bsrs.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting relabel-tape at `date +%R:%S` ==="
-echo " === Starting relabel-tape at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave level=Full yes
-wait
-messages
-add pool=Default
-0
-TestVolume002
-@# set status to append
-update volume=TestVolume001
-1
-.
-run job=NightlySave level=Full yes
-@sleep 20
-unmount
-unmount
-purge volume=TestVolume001
-relabel oldvolume=TestVolume001 volume=TestVolume003 slot=0 pool=Default
-list volumes
-mount
-messages
-wait
-run job=NightlySave level=Full yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-echo "Backup done"
-#
-# now do several restores to ensure we cleanup between jobs
-#
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-@$out tmp/log2.out
-@#
-@# now unmount the tape and start two restores
-@# at the same time
-@#
-unmount storage=DDS-4
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-restore where=${cwd}/tmp/bacula-restores select
-unmark *
-mark *
-done
-yes
-mount storage=DDS-4
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-dstat=$?
-if [ $dstat != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! relabel-tape Bacula source failed!!! !!!!! "
- echo " !!!!! relabel-tape failed!!! !!!!! " >>test.out
- echo "diff=$dstat backup=$bstat restore=$rstat"
- echo " "
-else
- echo " ===== relabel-tape Bacula source OK ===== "
- echo " ===== relabel-tape OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape where the maximum tape file size is set to 1M
-# then restore a few files from it. Note, by setting the maximum
-# file size to 1M, it runs very slow. There are about 64 files that
-# are created during each of the two backups.
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-sed s%\^%${cwd}% ${cwd}/scripts/flist >${cwd}/tmp/restore2-list
-
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed "s%# Maximum File Size% Maximum File Size%" ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting restore-by-file-tape test at `date +%R:%S` ==="
-echo " === Starting restore-by-file-tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-run job=NightlySave level=Full yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=DDS-4 file=<${cwd}/tmp/restore2-list
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-dstat=0
-scripts/check_for_zombie_jobs storage=DDS-4
-#
-# We need to stop and start Bacula to
-# test appending to a previously written tape
-#
-bin/bacula stop 2>&1 >/dev/null
-bin/bacula start 2>&1 >/dev/null
-for i in `cat ${cwd}/tmp/restore2-list`; do
- diff $i ${cwd}/tmp/bacula-restores$i
- if [ $? != 0 ] ; then
- dstat=1
- fi
-done
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-if [ $dstat != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! restore-by-file-tape test Bacula source failed!!! !!!!! "
- echo " !!!!! restore-by-file-tape test failed!!! !!!!! " >>test.out
- echo " "
- bin/bacula stop 2>&1 >/dev/null
- exit 1
-else
- echo " First of two restores OK "
-fi
-rm -rf ${cwd}/tmp/bacula-restores
-#
-# Now do a second backup and restore
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run job=NightlySave level=Full yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores
-7
-<${cwd}/tmp/restore2-list
-
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bacula stop 2>&1 >/dev/null
-for i in `cat ${cwd}/tmp/restore2-list`; do
- diff $i ${cwd}/tmp/bacula-restores$i
- if [ $? != 0 ] ; then
- dstat=1
- fi
-done
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-if [ $dstat != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! restore-by-file-tape test Bacula source failed!!! !!!!! "
- echo " !!!!! restore-by-file-tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== restore-by-file-tape test Bacula source OK ===== "
- echo " ===== restore-by-file-tape test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then restore it.
-#
-
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/build" >/tmp/file-list
-mkdir ${cwd}/tmp/build
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp/build
-ls >../1
-cd ..
-sed s%\^%${cwd}/tmp/build/% 1 >restore-list
-rm -f 1
-cd ${cwd}
-
-echo " "
-echo " "
-echo " === Starting restore-by-file-test at `date +%R:%S` ==="
-echo " === Starting restore-by-file-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=File
-7
-<${cwd}/tmp/restore-list
-
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! restore-by-file-test Bacula source failed!!! !!!!! "
- echo " !!!!! restore-by-file-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== restore-by-file-test Bacula source OK ===== "
- echo " ===== restore-by-file-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a backup of the full bacula build directory, but with the
-# Maximum File Size set. Then do a restore of a few files to kick in
-# disk seeking (not yet enabled), and ensure that the restored files
-# match. Even though disk seeking is not yet enabled, this is a good test,
-# and once it is enabled, this will test it.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/tmp/build" >/tmp/file-list
-rm -rf ${cwd}/tmp/build
-mkdir ${cwd}/tmp/build
-# Copy only the .c files (to be restored)
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-cd ${cwd}/tmp/build
-ls >../1
-cd ..
-sed s%\^%${cwd}/tmp/build/% 1 >restore-list
-# At this point restore-list contains the list
-# of files we will restore
-rm -f 1
-cd ${cwd}
-# Now backup *everything*
-rm -rf ${cwd}/tmp/build
-mkdir ${cwd}/tmp/build
-cp -fp ${cwd}/build/src/dird/* ${cwd}/tmp/build
-# Enable MaximumFileSize
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed "s%# Maximum File Size% Maximum File Size%" ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting restore-disk-seek-test at `date +%R:%S` ==="
-echo " === Starting restore-disk-seek-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-sql
-select * from JobMedia;
-
-restore bootstrap=${cwd}/tmp/kern.bsr where=${cwd}/tmp/bacula-restores storage=File
-7
-<${cwd}/tmp/restore-list
-
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-# Now setup a control directory of only what we *should* restore
-rm -rf ${cwd}/tmp/build
-mkdir ${cwd}/tmp/build
-cp -p ${cwd}/build/src/dird/*.c ${cwd}/tmp/build
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r tmp/build tmp/bacula-restores${cwd}/tmp/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! restore-disk-seek-test Bacula source failed!!! !!!!! "
- echo " !!!!! restore-disk-seek-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== restore-disk-seek-test Bacula source OK ===== "
- echo " ===== restore-disk-seek-test OK ===== " >>test.out
-# scripts/cleanup
-# rm -rf ${cwd}/tmp/build
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then restore a few selected files.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-sed s%\^%${cwd}% ${cwd}/scripts/flist >${cwd}/tmp/restore2-list
-
-echo " "
-echo " "
-echo " === Starting restore2-by-file-test at `date +%R:%S` ==="
-echo " === Starting restore2-by-file-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores storage=File file=<${cwd}/tmp/restore2-list
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-dstat=0
-for i in `cat ${cwd}/tmp/restore2-list`; do
- diff $i ${cwd}/tmp/bacula-restores$i
- if [ $? != 0 ] ; then
- dstat=1
- fi
-done
-if [ $dstat != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! restore2-by-file-test Bacula source failed!!! !!!!! "
- echo " !!!!! restore2-by-file-test failed!!! !!!!! " >>test.out
- echo " "
- echo "Backup term status = $bstat"
- echo "Restore term status = $rstat"
- echo "Diff status = $dstat"
- echo " "
-else
- echo " ===== restore2-by-file-test Bacula source OK ===== "
- echo " ===== restore2-by-file-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Create a 60MB file with random bytes. Back it up to 6 Volumes
-# each constrained to 10MB using the automatic labeling feature.
-#
-
-if test ! -c /dev/urandom ; then
- echo "No random device. Test skipped.\n"
- exit 0
-fi
-cwd=`pwd`
-scripts/copy-testa-confs
-scripts/cleanup
-echo "${cwd}/tmp/largefile" >/tmp/file-list
-# Create 56MB file with random data
-echo "Creating a 56MB file with random data ..."
-dd if=/dev/urandom of=${cwd}/tmp/largefile bs=1024 count=55000
-echo "largefile created"
-
-echo " "
-echo " "
-echo " === Starting six-vol-test at `date +%R:%S` ==="
-echo " === Starting six-vol-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run job=MultiVol storage=File yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff tmp/largefile tmp/bacula-restores${cwd}/tmp/largefile 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! six-vol-test Bacula source failed!!! !!!!! "
- echo " !!!!! six-vol-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== six-vol-test Bacula source OK ===== "
- echo " ===== six-vol-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape where the maximum tape file size is set to 1M
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed "s%# Maximum File Size% Maximum File Size%" ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting Small File Size test at `date +%R:%S` ==="
-echo " === Starting Small File Size test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-setdebug level=2 storage=DDS-4
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=DDS-4
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Small File Size test Bacula source failed!!! !!!!! "
- echo " !!!!! Small File Size test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Small File Size test Bacula source OK ===== "
- echo " ===== Small File Size test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# split the archive into four volumes, two of which are
-# totally full. I.e. make sure that bsr selects all tapes
-# including those fully spanned.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting span-vol-test at `date +%R:%S` ==="
-echo " === Starting span-vol-test at `date +%R:%S` ===" >working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File1 volume=TestVolume004
-label storage=File1 volume=TestVolume003
-label storage=File1 volume=TestVolume002
-label storage=File1 volume=TestVolume001
-update Volume=TestVolume004 MaxVolBytes=3000000
-update Volume=TestVolume003 MaxVolBytes=3000000
-update Volume=TestVolume002 MaxVolBytes=3000000
-run job=NightlySave storage=File1 yes
-wait
-list volumes
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File1
-unmark *
-mark *
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! span-vol-test Bacula source failed!!! !!!!! "
- echo " !!!!! span-vol-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== span-vol-test Bacula source OK ===== "
- echo " ===== span-vol-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the Sparse option
-# then restore it.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-bin/bacula stop 2>&1 >/dev/null
-
-echo " "
-echo " "
-echo " === Starting sparse-compressed-test at `date +%R:%S` ==="
-echo " === Starting sparse-compressed-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=SparseCompressedTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=File done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! sparse-compressed-test Bacula source failed!!! !!!!! "
- echo " !!!!! sparse-compressed-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== sparse-compressed-test Bacula source OK ===== "
- echo " ===== sparse-compressed-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the Sparse option
-# then restore it.
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting sparse-test at `date +%R:%S` ==="
-echo " === Starting sparse-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=SparseTest yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! sparse-test Bacula source failed!!! !!!!! "
- echo " !!!!! sparse-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== sparse-test Bacula source OK ===== "
- echo " ===== sparse-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-echo " " >test.out
-rm -f bin/working/*
+++ /dev/null
-#!/bin/sh
-#
-# Test for a tape truncation bug.
-#
-cwd=`pwd`
-
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting truncate-bug-tape test at `date +%R:%S` ==="
-echo " === Starting truncate-bug-tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default
-@# do a bunch of saves so we have 12 files on the tape
-run job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-run level=Full job=NightlySave yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-@#
-@# now do a restore
-@#
-restore where=${cwd}/tmp/bacula-restores storage=DDS-4
-3
-@# select JobId=4 (i.e. file five on the tape)
-4
-cd ${cwd}/build
-@# mark a single file
-mark configure
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=DDS-4
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log2.out
-run level=Full job=NightlySave yes
-wait
-messages
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Backup OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! truncate-bug-tape test Bacula source failed!!! !!!!! "
- echo " !!!!! truncate-bug-tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== truncate-bug-tape test Bacula source OK ===== "
- echo " ===== truncate-bug-tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory using the compressed option
-# then backup a second time and finally restore it
-#
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting two-jobs-test at `date +%R:%S` ==="
-echo " === Starting two-jobs-test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-setdebug level=15 storage=File
-@output /dev/null
-messages
-@output /dev/null
-estimate job=CompressedTest listing
-estimate job=CompressedTest listing
-estimate job=CompressedTest listing
-messages
-@output tmp/log1.out
-label storage=File volume=TestVolume001
-run job=CompressedTest yes
-wait
-messages
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-echo "Backup 1 done"
-touch ${cwd}/build/src/dird/*.c
-#
-# run a second job
-#
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA 2>&1 >/dev/null
-@output /dev/null
-messages
-@output tmp/log1.out
-run job=CompressedTest
-yes
-wait
-messages
-@#
-@# now do several restores to ensure we cleanup between jobs
-@#
-@output /dev/null
-restore where=${cwd}/tmp/bacula-restores select all storage=File done
-yes
-wait
-restore where=${cwd}/tmp/bacula-restores select all storage=File done
-yes
-wait
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! two-jobs-test Bacula source failed!!! !!!!! "
- echo " !!!!! two-jobs-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== two-jobs-test Bacula source OK ===== "
- echo " ===== two-jobs-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# This is Arno's test. It uses two pools, two tapes, and
-# an autochanger. Note, the Director has three Pools in its
-# conf: Default, Full, and Inc. Default is used in the
-# NightlySave job by default. What is backed up is what
-# is in /tmp/file-list, which is by default the Bacula
-# source code (i.e. the build directory).
-#
-. config.out
-if test x${AUTOCHANGER} = x/dev/null ; then
- echo "two-pool-tape test skipped. No autochanger."
- exit
-fi
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/cleanup
-scripts/copy-2tape-confs
-scripts/prepare-two-tapes
-
-# Make a relatively large backup set 5 x source code directory
-echo "${cwd}/build" >/tmp/file-list
-echo "${cwd}/build" >>/tmp/file-list
-echo "${cwd}/build" >>/tmp/file-list
-echo "${cwd}/build" >>/tmp/file-list
-echo "${cwd}/build" >>/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting Two Pool Tape test at `date +%R:%S` ==="
-echo " === Starting Two Pool Tape test at `date +%R:%S` ===" >>working/log
-echo " "
-# Write out bconsole commands to a file
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=1 pool=Full drive=0
-label storage=DDS-4 volume=TestVolume002 slot=2 pool=Default drive=0
-list volumes
-@# Start job with Client run before and sleep
-run job=NightlySave1 level=Full pool=Default yes
-run job=NightlySave1 level=Full pool=Default yes
-run job=NightlySave1 level=Full pool=Default yes
-@# wait between starting jobs
-@sleep 60
-@#setdebug level=100 storage=DDS-4
-run job=NightlySave2 level=Full pool=Full yes
-run job=NightlySave2 level=Full pool=Full yes
-run job=NightlySave2 level=Full pool=Full yes
-@sleep 10
-messages
-@sleep 10
-messages
-@sleep 10
-status storage=DDS-4
-messages
-wait
-list volumes
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-# start Bacula and run bconsole commands
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-# Make sure no jobs are still running (debug check)
-scripts/check_for_zombie_jobs storage=DDS-4
-
-# stop Bacula
-bin/bacula stop 2>&1 >/dev/null
-# Check if backup done correctly
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Two Pool Tape test Bacula source failed!!! !!!!! "
- echo " !!!!! Two Pool Tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Two Pool Tape test Bacula source OK ===== "
- echo " ===== Two Pool Tape test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory but
-# split the archive into two volumes
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting two-vol-test at `date +%R:%S` ==="
-echo " === Starting two-vol-test at `date +%R:%S` ===" >working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File1 volume=TestVolume002
-label storage=File1 volume=TestVolume001
-update Volume=TestVolume002 MaxVolBytes=3000000
-run job=NightlySave storage=File1 yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=File1 done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=File1
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! two-vol-test Bacula source failed!!! !!!!! "
- echo " !!!!! two-vol-test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== two-vol-test Bacula source OK ===== "
- echo " ===== two-vol-test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to two tapes where the maximum tape file size is set to 1M
-# Note, this test simulates the tape filling and writing to
-# the next tape.
-#
-. config.out
-if test x${AUTOCHANGER} = x/dev/null ; then
- echo "two-volume-tape test skipped. No autochanger."
- exit
-fi
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-cwd=`pwd`
-scripts/cleanup
-scripts/copy-2tape-confs
-scripts/prepare-two-tapes
-
-echo "${cwd}/build" >/tmp/file-list
-
-outf="tmp/sed_tmp"
-echo "s%# Maximum File Size% Maximum File Size%g" >${outf}
-cp ${cwd}/bin/bacula-sd.conf ${cwd}/tmp/1
-sed -f ${outf} ${cwd}/tmp/1 >${cwd}/bin/bacula-sd.conf
-
-echo " "
-echo " "
-echo " === Starting Two Volume Tape test at `date +%R:%S` ==="
-echo " === Starting Two Volume Tape test at `date +%R:%S` ===" >>working/log
-echo " "
-# Write out bconsole commands
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=1 pool=Default drive=0
-label storage=DDS-4 volume=TestVolume002 slot=2 pool=Default drive=0
-update Volume=TestVolume001 MaxVolBytes=3000000 pool=Default drive=0
-@#setdebug level=1000 client
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff -r build tmp/bacula-restores${cwd}/build 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Two Volume Tape test Bacula source failed!!! !!!!! "
- echo " !!!!! Two Volume Tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Two Volume Tape test Bacula source OK ===== "
- echo " ===== Two Volume Tape test OK ===== " >>test.out
-# scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the /usr directory
-# then restore it.
-#
-cwd=`pwd`
-scripts/copy-tape-confs
-scripts/cleanup-tape
-echo "/usr" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting usr-tape-root test ==="
-echo " "
-echo " "
-
-bin/bacula start 2>&1 >/dev/null
-bin/bconsole -c bin/bconsole.conf <<END_OF_DATA
-@output /dev/null
-messages
-@output tmp/log1.out
-label storage=DDS-4 Volume=TestVolume001 slot=0
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@output tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select all done
-yes
-wait
-messages
-@output
-quit
-END_OF_DATA
-bin/bacula stop 2>&1 >/dev/null
-cd /
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-usr-test lib >${cwd}/tmp/original
-cd ${cwd}/tmp/bacula-restores
-${cwd}/bin/testls -e ${cwd}/scripts/exclude-usr-test lib >${cwd}/tmp/restored
-cd ${cwd}/tmp
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% original >1
-sort <1 >original
-#
-sed s%.*lib/kbd/consolefonts$%lib/kbd/consolefonts% restored >1
-sort <1 >restored
-rm -f 1
-#
-cd ${cwd}
-diff tmp/original tmp/restored 2>&1 1>/dev/nul
-if [ $? != 0 ] ; then
- echo " "
- echo " "
- echo " ===== !!!! usr-tape-root failed !!!! ===== "
- echo " ===== !!!! usr-tape-root failed !!!! ===== " >>test.out
- echo " "
-else
- echo " ===== usr-tape-root OK ===== "
- echo " ===== usr-tape-root OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then verify the catalog.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting verify Volume Test at `date +%R:%S` ==="
-echo " === Starting verify Volume Test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-setdebug level=1 storage=File sd
-label storage=File volume=TestVolume001
-run job=NightlySave yes
-wait
-messages
-@#
-@# now do a verify volume
-@#
-@$out ${cwd}/tmp/original
-run job=VerifyVolume
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-sleep 2
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Verify OK" ${cwd}/tmp/original 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Verify Volume failed!!! !!!!! "
- echo " !!!!! Verify Volume failed!!! !!!!! " >>test.out
- if [ $bstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== Verify Volume Test OK ===== "
- echo " ===== Verify Volume Test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-if test ! -d weird-files ; then
- echo " "
- echo "Weird files not configured. Test not run."
- exit 0
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-#
-# Note, we save the weird-files directory twice on purpose
-# because this causes problems with hard linked files
-# that are only saved once. In 1.33, Bacula now deals
-# with this situation.
-#
-echo "${cwd}/weird-files" >/tmp/file-list
-echo "${cwd}/weird-files" >>/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting weird filenames test at `date +%R:%S` ==="
-echo " === Starting weird filenames test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File
-TestVolume001
-run job=NightlySave
-yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=${cwd}/tmp/bacula-restores select storage=File
-unmark *
-mark *
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-${cwd}/bin/testls weird-files | sort >${cwd}/tmp/original
-cd tmp/bacula-restores${cwd}
-${cwd}/bin/testls weird-files | sort >${cwd}/tmp/restored
-cd ${cwd}
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff ${cwd}/tmp/original ${cwd}/tmp/restored 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Weird files test failed!!! !!!!! "
- echo " !!!!! Weird files test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== Weird files test OK ===== "
- echo " ===== Weird files test OK ===== " >>test.out
- cd ${cwd}
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# then restore it.
-#
-debug=0
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-if test ! -d weird-files ; then
- echo " "
- echo "weird files not configured. Test not run."
- exit 0
-fi
-cwd=`pwd`
-scripts/copy-test-confs
-scripts/cleanup
-rm -rf weird-files2
-cp -Rp weird-files weird-files2
-echo "${cwd}/weird-files2" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting weird-files2 test at `date +%R:%S` ==="
-echo " === Starting weird-file2 test at `date +%R:%S` ===" >>working/log
-echo " "
-
-bin/testls weird-files2 >${cwd}/tmp/original
-
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@$out tmp/log1.out
-label storage=File volume=TestVolume001
-run job=NightlySave yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=File
-#
-# Now mess up the a hard link, and a soft link
-#
-cd weird-files2
-rm -f hard-file2
-ln hard-file3 hard-file2
-rm -f soft-file2
-ln -s soft-file3 soft-file2
-cd ${cwd}
-cat <<END_OF_DATA >tmp/bconcmds
-@$out /dev/null
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where= storage=File
-5
-unmark *
-mark *
-done
-yes
-wait
-messages
-@$out
-quit
-END_OF_DATA
-
-if test "$debug" -eq 1 ; then
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-scripts/check_for_zombie_jobs storage=File
-bin/bacula stop 2>&1 >/dev/null
-bin/testls weird-files2 >${cwd}/tmp/restored
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-diff ${cwd}/tmp/original ${cwd}/tmp/restored 2>&1 >/dev/null
-if [ $? != 0 -o $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Weird files2 test failed!!! !!!!! "
- echo " !!!!! Weird files2 test failed!!! !!!!! " >>test.out
- if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " !!!!! Bad Job termination status !!!!! "
- echo " !!!!! Bad Job termination status !!!!! " >>test.out
- else
- echo " !!!!! Restored files differ !!!!! "
- echo " !!!!! Restored files differ !!!!! " >>test.out
- fi
- echo " "
-else
- echo " ===== Weird files2 test OK ===== "
- echo " ===== Weird files2 test OK ===== " >>test.out
- cd ${cwd}
- scripts/cleanup
- rm -rf weird-files2
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape then restore it, we do that twice to ensure that
-# we can correctly append to a tape.
-#
-debug=1
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-win32-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting Win32 Backup tape test at `date +%R:%S` ==="
-echo " === Starting Win32 Backup tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default drive=0
-run job=NightlySave yes
-@sleep 10
-status storage=DDS-4
-@sleep 30
-messages
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=c:/tmp/bacula-restores select all storage=DDS-4 done
-yes
-wait
-messages
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Win32 Backup tape test failed!!! !!!!! "
- echo " !!!!! Win32 Backup tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Win32 Backup tape test OK ===== "
- echo " ===== Win32 Backup tape test OK ===== " >>test.out
- scripts/cleanup
-fi
+++ /dev/null
-#!/bin/sh
-#
-# Run a simple backup of the Bacula build directory
-# to a tape then restore it, we do that twice to ensure that
-# we can correctly append to a tape.
-#
-debug=1
-if test "$debug" -eq 1 ; then
- out="tee"
-else
- out="output"
-fi
-
-cwd=`pwd`
-scripts/copy-win32-confs
-scripts/cleanup-tape
-
-echo "${cwd}/build" >/tmp/file-list
-
-echo " "
-echo " "
-echo " === Starting Win32 Backup tape test at `date +%R:%S` ==="
-echo " === Starting Win32 Backup tape test at `date +%R:%S` ===" >>working/log
-echo " "
-
-cat <<END_OF_DATA >tmp/bconcmds
-@output /dev/null
-messages
-@$out tmp/log1.out
-label storage=DDS-4 volume=TestVolume001 slot=0 pool=Default drive=0
-run job=NightlySave yes
-@sleep 10
-status storage=DDS-4
-@sleep 30
-messages
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out tmp/log2.out
-restore where=c:/tmp/bacula-restores client=Tibs select all storage=DDS-4 done
-yes
-wait
-messages
-END_OF_DATA
-if test "$debug" -eq 1 ; then
- bin/bacula start
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf
-else
- bin/bacula start 2>&1 >/dev/null
- cat tmp/bconcmds | bin/bconsole -c bin/bconsole.conf 2>&1 >/dev/null
-fi
-
-scripts/check_for_zombie_jobs storage=DDS-4
-
-bin/bacula stop 2>&1 >/dev/null
-grep "^ Termination: *Backup OK" tmp/log1.out 2>&1 >/dev/null
-bstat=$?
-grep "^ Termination: *Restore OK" tmp/log2.out 2>&1 >/dev/null
-rstat=$?
-if [ $bstat != 0 -o $rstat != 0 ] ; then
- echo " "
- echo " "
- echo " !!!!! Win32 Backup tape test failed!!! !!!!! "
- echo " !!!!! Win32 Backup tape test failed!!! !!!!! " >>test.out
- echo " "
-else
- echo " ===== Win32 Backup tape test OK ===== "
- echo " ===== Win32 Backup tape test OK ===== " >>test.out
-# scripts/cleanup
-fi