--- /dev/null
+#
+# Default Bacula Director Configuration file
+#
+# The only thing that MUST be changed is to add one or more
+# file or directory names in the Include directive of the
+# FileSet resource.
+#
+# For Bacula release 1.39.27 (24 October 2006) -- debian testing/unstable
+#
+# You might also want to change the default email address
+# from root to your address. See the "mail" and "operator"
+# directives in the Messages resource.
+#
+
+Director { # define myself
+ Name = @hostname@-dir
+ DIRport = 8101 # where we listen for UA connections
+ QueryFile = "@scriptdir@/query.sql"
+ WorkingDirectory = "@working_dir@"
+ PidDirectory = "@piddir@"
+ SubSysDirectory = "@subsysdir@"
+ Maximum Concurrent Jobs = 4
+ Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
+ Messages = Daemon
+}
+
+JobDefs {
+ Name = "DefaultJob"
+ Type = Backup
+ Level = Incremental
+ Client = @hostname@-fd
+ FileSet = FS_TESTJOB
+ Storage = File
+ Messages = Standard
+ Pool = Default
+ Priority = 10
+ Maximum Concurrent Jobs = 16
+}
+
+FileSet {
+ Name = FS_TESTJOB
+ Include {
+ File=</tmp/file-list
+ }
+}
+
+#dir: BeforeJob: run command "/bin/echo RunBeforeJob"
+#fd: ClientRunBeforeJob: ClientRunBeforeJob
+#fd: ClientAfterJob: run command "/bin/echo ClientRunAfterJob"
+#dir: AfterJob: run command "/bin/echo RunAfterJob"
+
+Job {
+ Name = "RUN_ALL_OK"
+ JobDefs = DefaultJob
+ FileSet = FS_TESTJOB
+ RunBeforeJob = "/bin/echo RunBeforeJob"
+ ClientRunBeforeJob = "/bin/echo ClientRunBeforeJob"
+ Run After Job = "/bin/echo RunAfterJob"
+ ClientRunAfterJob = "/bin/echo ClientRunAfterJob"
+}
+
+#After job are after before job
+Job {
+ Name = "RUN_DIR_FAILED_BUG"
+ FileSet = FS_TESTJOB
+ JobDefs = DefaultJob
+ Run After Failed Job = "/bin/echo RunAfterFailedJob"
+ RunBeforeJob = "/bin/false"
+}
+
+#@hostname@-dir: BeforeJob: run command "/bin/false"
+#@hostname@-dir: BeforeJob: RunAfterFailedJob
+Job {
+ Name = "RUN_DIR_FAILED"
+ FileSet = FS_TESTJOB
+ JobDefs = DefaultJob
+ RunBeforeJob = "/bin/false"
+ Run After Failed Job = "/bin/echo RunAfterFailedJob"
+}
+
+#@hostname@-fd: ClientBeforeJob: run command "/bin/false"
+#@hostname@-fd: ClientBeforeJob: run command "/bin/false 2"
+#@hostname@-dir: AfterJob: run command "/bin/echo RunAfterFailedJob"
+Job {
+ Name = "RUN_FD_FAILED"
+ FileSet = FS_TESTJOB
+ JobDefs = DefaultJob
+ RunScript {
+ Command = "/bin/false"
+ abortjobonerror = no
+ RunsWhen = Before
+ }
+ RunScript {
+ Command = "/bin/false 2"
+ abortjobonerror = yes
+ RunsWhen = Before
+ }
+
+ Run After Failed Job = "/bin/echo RunAfterFailedJob"
+}
+
+#@hostname@-fd: ClientBeforeJob: run command "/bin/false"
+#Backup OK -- with warnings
+Job {
+ Name = "RUN_FD_WARNING"
+ FileSet = FS_TESTJOB
+ JobDefs = DefaultJob
+ RunScript {
+ Command = "/bin/false After False"
+ abortjobonerror = no
+ RunsWhen = Before
+ }
+ Run After Failed Job = "/bin/echo RunAfterFailedJob"
+}
+
+# Client (File Services) to backup
+Client {
+ Name = @hostname@-fd
+ Address = @hostname@
+ FDPort = 8102
+ Catalog = MyCatalog
+ Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
+ File Retention = 30d # 30 days
+ Job Retention = 180d # six months
+ AutoPrune = yes # Prune expired Jobs/Files
+ Maximum Concurrent Jobs = 4
+}
+
+# Definiton of file storage device
+Storage {
+ Name = File
+ Address = @hostname@ # N.B. Use a fully qualified name here
+ SDPort = 8103
+ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
+ Device = FileStorage
+ Media Type = File
+ Maximum Concurrent Jobs = 4
+}
+
+# Generic catalog service
+Catalog {
+ Name = MyCatalog
+ dbname = bacula; user = bacula; password = ""
+}
+
+
+Messages {
+ Name = Standard
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
+ operatorcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: Intervention needed for %j\" %r
+"
+ MailOnError = @job_email@ = all
+ operator = @job_email@ = mount
+ console = all, !skipped, !terminate, !restored
+#
+# WARNING! the following will create a file that you must cycle from
+# time to time as it will grow indefinitely. However, it will
+# also keep all your messages if the scroll off the console.
+#
+ append = "@working_dir@/log" = all, !skipped
+}
+
+Messages {
+ Name = NoEmail
+ mailcommand = "@sbindir@/bsmtp -h localhost -f \"\(Bacula Regression\) %r\" -s \"Bacula: %t %e of %c %l\" %r"
+ console = all, !skipped, !terminate
+#
+# WARNING! the following will create a file that you must cycle from
+# time to time as it will grow indefinitely. However, it will
+# also keep all your messages if the scroll off the console.
+#
+ append = "@working_dir@/log" = all, !skipped
+}
+
+
+# Message delivery for daemon messages (no job).
+Messages {
+ Name = Daemon
+ mailcommand = "@sbindir@/bsmtp -h @smtp_host@ -f \"\(Bacula\) %r\" -s \"Bacula daemon message\" %r"
+ mail = @job_email@ = all, !skipped
+ console = all, !skipped, !saved
+ append = "@working_dir@/log" = all, !skipped
+}
+
+# Default pool definition
+Pool {
+ Name = Default
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365d # one year
+}
--- /dev/null
+#!/bin/sh
+#
+# Test if Bacula can automatically create a Volume label.
+#
+
+TestName="runscript-test"
+
+. scripts/functions
+set_debug 0
+copy_test_confs
+
+rm -f bin/bacula-dir.conf
+/bin/cp -f scripts/bacula-dir.conf.testrunscript bin/bacula-dir.conf
+
+echo "${cwd}/build" >/tmp/file-list
+
+start_test
+
+cat <<END_OF_DATA >tmp/bconcmds
+@output /dev/null
+messages
+label volume=TestVolume001
+@#setdebug level=100 storage=File
+@output tmp/log.RUN_ALL_OK.out
+run job=RUN_ALL_OK yes
+wait
+@sleep 5
+messages
+@output tmp/log.RUN_DIR_FAILED.out
+run job=RUN_DIR_FAILED yes
+wait
+@sleep 5
+messages
+@output tmp/log.RUN_FD_FAILED.out
+run job=RUN_FD_FAILED yes
+wait
+@sleep 5
+messages
+@output tmp/log.RUN_FD_WARNING.out
+run job=RUN_FD_WARNING yes
+wait
+@sleep 5
+messages
+st dir
+quit
+END_OF_DATA
+
+run_bacula
+stop_bacula
+
+if grep -q 'dir: BeforeJob: run command "/bin/echo RunBeforeJob"' tmp/log.RUN_ALL_OK.out &&
+ grep -q 'fd: ClientRunBeforeJob: ClientRunBeforeJob' tmp/log.RUN_ALL_OK.out &&
+ grep -q 'fd: ClientAfterJob: run command "/bin/echo ClientRunAfterJob' tmp/log.RUN_ALL_OK.out &&
+ grep -q 'dir: AfterJob: run command "/bin/echo RunAfterJob' tmp/log.RUN_ALL_OK.out
+then
+ [ "$debug" -eq 1 ] && echo RUN_ALL_OK ok
+else
+ echo "RUN_ALL_OK in error"
+ rstat=1
+fi
+
+if grep -q 'dir: BeforeJob: run command "/bin/false"' tmp/log.RUN_DIR_FAILED.out &&
+ grep -q 'dir: BeforeJob: RunAfterFailedJob' tmp/log.RUN_DIR_FAILED.out &&
+ true # grep -q 'Backup OK -- with warnings' tmp/log.RUN_DIR_FAILED.out
+then
+ [ "$debug" -eq 1 ] && echo RUN_DIR_FAILED ok
+else
+ echo "RUN_DIR_FAILED in error"
+ rstat=1
+fi
+
+if grep -q 'fd: ClientBeforeJob: run command "/bin/false"' tmp/log.RUN_FD_FAILED.out &&
+ grep -q 'fd: ClientBeforeJob: run command "/bin/false 2"' tmp/log.RUN_FD_FAILED.out &&
+ grep -q 'dir: AfterJob: run command "/bin/echo RunAfterFailedJob"' tmp/log.RUN_FD_FAILED.out
+then
+ [ "$debug" -eq 1 ] && echo RUN_FD_FAILED ok
+else
+ echo "RUN_FD_FAILED in error"
+ rstat=1
+fi
+
+if grep -q 'fd: ClientBeforeJob: run command "/bin/false"' tmp/log.RUN_FD_WARNING.out &&
+ grep -q 'Backup OK -- with warnings' tmp/log.RUN_FD_WARNING.out
+then
+ [ "$debug" -eq 1 ] && echo RUN_FD_WARNING ok
+else
+ echo "RUN_FD_WARNING in error"
+ rstat=1
+fi
+