# # Default Bacula Director Configuration file # # The only thing that MUST be changed is to add one or more # file or directory names in the Include directive of the # FileSet resource. # # For Bacula release 1.39.27 (24 October 2006) -- debian testing/unstable # # You might also want to change the default email address # from root to your address. See the "mail" and "operator" # directives in the Messages resource. # Director { # define myself Name = @hostname@-dir DIRPort = @dirport@ # where we listen for UA connections QueryFile = "@scriptdir@/query.sql" WorkingDirectory = "@working_dir@" PidDirectory = "@piddir@" SubSysDirectory = "@subsysdir@" Maximum Concurrent Jobs = 4 Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password Messages = Standard } JobDefs { Name = "DefaultJob" Type = Backup Level = Incremental Client = @hostname@-fd FileSet = FS_TESTJOB Storage = File Messages = Standard Pool = Default Priority = 10 Maximum Concurrent Jobs = 16 } FileSet { Name = FS_TESTJOB Include { File=<@tmpdir@/file-list } } #dir: BeforeJob: run command "/bin/echo RunBeforeJob" #fd: ClientRunBeforeJob: ClientRunBeforeJob #fd: ClientAfterJob: run command "/bin/echo ClientRunAfterJob" #dir: AfterJob: run command "/bin/echo RunAfterJob" Job { Name = "RUN_ALL_OK" JobDefs = DefaultJob FileSet = FS_TESTJOB RunBeforeJob = "/bin/echo RunBeforeJob" ClientRunBeforeJob = "/bin/echo ClientRunBeforeJob" Run After Job = "/bin/echo RunAfterJob" ClientRunAfterJob = "/bin/echo ClientRunAfterJob" } #After job are after before job Job { Name = "RUN_DIR_FAILED_BUG" FileSet = FS_TESTJOB JobDefs = DefaultJob Run After Failed Job = "/bin/echo RunAfterFailedJob" RunBeforeJob = "/bin/false" } #@hostname@-dir: BeforeJob: run command "/bin/false" #@hostname@-dir: BeforeJob: RunAfterFailedJob Job { Name = "RUN_DIR_FAILED" FileSet = FS_TESTJOB JobDefs = DefaultJob RunBeforeJob = "/bin/false RUN_DIR_FAILED" Run After Failed Job = "/bin/echo RunAfterFailedJob" } #@hostname@-fd: ClientBeforeJob: run command "/bin/false RUN_FD_FAILED1" #@hostname@-fd: ClientBeforeJob: run command "/bin/false RUN_FD_FAILED2" #@hostname@-fd: ClientBeforeJob: run command "/bin/false RUN_FD_FAILED3" #@hostname@-dir: AfterJob: run command "/bin/echo RunAfterFailedJob" Job { Name = "RUN_FD_FAILED" FileSet = FS_TESTJOB JobDefs = DefaultJob RunScript { Command = "/bin/false RUN_FD_FAILED1" Command = "/bin/false RUN_FD_FAILED2" failjobonerror = no RunsWhen = Before } RunScript { Command = "/bin/false RUN_FD_FAILED3" failjobonerror = yes RunsWhen = Before } Run After Failed Job = "/bin/echo RunAfterFailedJob" RunScript { Command = "/bin/echo touching @tmpdir@/RUN_FD_FAILED" RunsWhen = after RunsOnFailure = yes } RunScript { Command = "/bin/touch @tmpdir@/RUN_FD_FAILED" RunsWhen = after RunsOnFailure = yes } } #@hostname@-fd: ClientBeforeJob: run command "/bin/false RUN_FD_FAILED1" #we dont execute FAILED2 #@hostname@-dir: AfterJob: run command "/bin/echo RunAfterFailedJob" Job { Name = "RUN_FD_FAILED2" FileSet = FS_TESTJOB JobDefs = DefaultJob RunScript { Command = "/bin/false RUN_FD_FAILED1" failjobonerror = yes RunsWhen = Before } RunScript { Command = "/bin/false RUN_FD_FAILED2" failjobonerror = yes RunsWhen = Before } RunScript { Command = "/bin/false RUN_FD_FAILED3" failjobonerror = yes RunsOnFailure = yes RunsWhen = Before } Run After Failed Job = "/bin/echo RunAfterFailedJob" } #@hostname@-fd: ClientBeforeJob: run command "/bin/false RUN_FD_WARNING" #Backup OK -- with warnings Job { Name = "RUN_FD_WARNING" FileSet = FS_TESTJOB JobDefs = DefaultJob RunScript { Command = "/bin/false RUN_FD_WARNING" failjobonerror = no RunsWhen = Before } Run After Failed Job = "/bin/echo RunAfterFailedJob" } Job { Name = "RUN_CONSOLE_CMD" FileSet = FS_TESTJOB JobDefs = DefaultJob RunScript { Console = "purge volume=TestVolume001 yes" Console = "st dir" RunsWhen = Before runsonclient = no } } Job { Name = "Restore" Type = Restore Client = @hostname@-fd FileSet = FS_TESTJOB Storage = File Messages = Standard Pool = Default RunScript { RunsWhen = Before RunsOnClient = Yes Command = "echo ClientBeforeRestore" } RunScript { RunsOnFailure = No RunsWhen = After RunsOnClient = Yes Command = "echo ClientAfterRestore" } RunScript { RunsWhen = Before RunsOnClient = No Command = "echo DirBeforeRestore" } RunScript { RunsOnFailure = No RunsWhen = After RunsOnClient = Yes Command = "echo DirAfterRestore" } } # Client (File Services) to backup Client { Name = @hostname@-fd Address = @hostname@ FDPort = @fdport@ Catalog = MyCatalog Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon File Retention = 30d # 30 days Job Retention = 180d # six months AutoPrune = yes # Prune expired Jobs/Files Maximum Concurrent Jobs = 4 } # Definiton of file storage device Storage { Name = File Address = @hostname@ # N.B. Use a fully qualified name here SDPort = @sdport@ Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" Device = FileStorage Media Type = File Maximum Concurrent Jobs = 4 } # Generic catalog service Catalog { Name = MyCatalog @libdbi@ dbname = @db_name@; user = @db_user@; password = "@db_password@" } Messages { Name = Standard console = all, !skipped, !saved } # Default pool definition Pool { Name = Default Pool Type = Backup Recycle = yes # Bacula can automatically recycle Volumes AutoPrune = yes # Prune expired volumes Volume Retention = 365d # one year }