Migration Time = 1
}
+Pool {
+ Name = Special
+ Pool Type = Backup
+ Recycle = yes # Bacula can automatically recycle Volumes
+ AutoPrune = yes # Prune expired volumes
+ Volume Retention = 365d # one year
+ NextPool = Full
+ Storage = File
+ Migration High Bytes = 40M
+ Migration Low Bytes = 20M
+ Migration Time = 5h
+}
+
Pool {
Name = Full
Pool Type = Backup
Recycle = yes
AutoPrune = yes
Pool Type = Scratch
+ RecyclePool = Scratch
}
fi
}
-#get_duration()
-#{
-# LOG=$1
-# T=`awk 'BEGIN {t["secs"]=1;t["sec"]=1;t["min"]=60;t["mins"]=60}; /Elapsed time:/ { last=$3*t[$4] } END { print last }' $LOG`
-# echo $T
-#}
-
-#check_duration()
-#{
-# LOG=$1
-# TIME=$2
-# OP=${3-gt}
-
-# T=$(get_duration $LOG)
-# if [ "$T" -$OP "$TIME" ]; then
-# print_debug "Expect $OP than $TIME sec, get $T"
-# bstat=2
-# fi
-#}
+################################################################
+# Get information from logs
+get_mig_info()
+{
+ # index of the job in the log (start from 0)
+ IDX=$1
+ LOG=$2
+ RET=`awk -v idx=$IDX -F: '/Prev Backup JobId/ { pbid[j++]=$2 } /New Backup JobId/ { nbid[i++]=$2 } END { print pbid[idx] " " nbid[idx] }' $LOG`
+}
+
+get_duration()
+{
+ LOG=$1
+ RET=`awk 'BEGIN {t["secs"]=1;t["sec"]=1;t["min"]=60;t["mins"]=60}; /Elapsed time:/ { last=$3*t[$4] } END { print last }' $LOG`
+}
+
+check_duration()
+{
+ LOG=$1
+ TIME=$2
+ OP=${3-gt}
+
+ get_duration $LOG
+ if [ "$RET" -$OP "$TIME" ]; then
+ print_debug "Expect $OP than $TIME sec, get $RET"
+ bstat=2
+ fi
+}
run_bacula()
{
--- /dev/null
+#!/bin/sh
+#
+# Run a simple backup of the Bacula build directory then copy it
+# to another device.
+#
+# This script uses the virtual disk autochanger
+#
+TestName="copy-job-test"
+JobName=CopyJobSave
+. scripts/functions
+
+
+scripts/cleanup
+scripts/copy-migration-confs
+scripts/prepare-disk-changer
+echo "${cwd}/build" >${cwd}/tmp/file-list
+sed -i 's/migrate/copy/g' ${cwd}/bin/bacula-dir.conf
+sed -i 's/Migrate/Copy/g' ${cwd}/bin/bacula-dir.conf
+
+
+change_jobname NightlySave $JobName
+start_test
+
+#
+# Note, we first backup into Pool Default,
+# then Copy into Pool Full.
+# Pool Default uses Storage=File
+# Pool Full uses Storage=DiskChanger
+
+# Write out bconsole commands
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output
+messages
+@$out ${cwd}/tmp/log1.out
+setdebug level=100 storage=File
+label storage=File volume=FileVolume001 Pool=Default
+label storage=DiskChanger volume=ChangerVolume001 slot=1 Pool=Full drive=0
+label storage=DiskChanger volume=ChangerVolume002 slot=2 Pool=Full drive=0
+@# run two jobs (both will be copied)
+run job=$JobName yes
+run job=$JobName yes
+wait
+list jobs
+list volumes
+@#setdebug level=100 dir
+@# should copy two jobs
+@#setdebug level=51 storage=DiskChanger
+run job=copy-job yes
+wait
+messages
+purge volume=FileVolume001
+list jobs
+list volumes
+wait
+messages
+@$out ${cwd}/tmp/log3.out
+@#
+@# Now do another backup, but level Incremental
+@#
+run job=$JobName level=Incremental yes
+wait
+messages
+@#
+@# This final job that runs should be Incremental and
+@# not upgraded to full.
+list jobs
+@#
+@# now do a restore
+@#
+@$out ${cwd}/tmp/log2.out
+list volumes
+restore where=${cwd}/tmp/bacula-restores select storage=DiskChanger
+unmark *
+mark *
+done
+yes
+list volumes
+wait
+messages
+@output
+quit
+END_OF_DATA
+
+run_bacula
+check_for_zombie_jobs storage=File
+stop_bacula
+
+check_two_logs
+check_restore_diff
+
+grep 'Backup Level:' tmp/log3.out | grep Incremental > /dev/null
+if [ $? != 0 ]; then
+ bstat=2
+ print_debug "The incremental job must use copied jobs"
+fi
+
+end_test
--- /dev/null
+#!/bin/sh
+#
+# Run a simple backup of the Bacula build directory then copy it
+# to another device.
+#
+# This script uses the virtual disk autochanger
+#
+TestName="copy-time-test"
+JobName=CopyJobSave
+. scripts/functions
+
+
+scripts/cleanup
+scripts/copy-migration-confs
+scripts/prepare-disk-changer
+echo "${cwd}/build" >${cwd}/tmp/file-list
+sed -i 's/migrate/copy/g' ${cwd}/bin/bacula-dir.conf
+sed -i 's/Migrate/Copy/g' ${cwd}/bin/bacula-dir.conf
+
+change_jobname NightlySave $JobName
+start_test
+
+#
+# Note, we first backup into Pool Default,
+# then Copy into Pool Full.
+# Pool Default uses Storage=File
+# Pool Full uses Storage=DiskChanger
+
+# Write out bconsole commands
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output
+messages
+@$out ${cwd}/tmp/log1.out
+label storage=File volume=FileVolume001 Pool=Default
+label storage=File volume=FileVolume002 Pool=Special
+label storage=DiskChanger volume=ChangerVolume001 slot=1 Pool=Full drive=0
+label storage=DiskChanger volume=ChangerVolume002 slot=2 Pool=Full drive=0
+list volumes
+@# run three jobs
+run job=$JobName Pool=Special level=Full yes
+run job=$JobName level=Full yes
+run job=$JobName level=Full yes
+wait
+messages
+update volume=FileVolume001 VolStatus=Used
+update volume=FileVolume002 VolStatus=Used
+llist jobid=2,3
+list jobs
+list volumes
+@#setdebug level=20 dir
+@# should copy only jobid=2 and 3
+run job=copy-time yes
+run job=copy-time pool=Special yes
+wait
+messages
+wait
+list jobs
+list volumes
+purge volume=FileVolume001
+purge volume=FileVolume002
+@#
+@# now do a restore
+@#
+@$out ${cwd}/tmp/log2.out
+restore where=${cwd}/tmp/bacula-restores select storage=DiskChanger
+unmark *
+mark *
+done
+yes
+wait
+messages
+@output
+quit
+END_OF_DATA
+
+run_bacula
+check_for_zombie_jobs storage=File
+stop_bacula
+
+print_debug "We must find 3 copy jobs into the log"
+for i in 0 1 2; do
+ get_mig_info $i ${cwd}/tmp/log1.out
+ set $RET >/dev/null
+
+ if [ -n "$1" -a "$1" != 1 ]; then
+ bstat=2
+ print_debug "The first job should not been copied"
+ fi
+done
+
+
+check_two_logs
+check_restore_diff
+end_test
stop_bacula
-################################################################
-# Get information from logs
-get_info()
-{
- IDX=$1
- LOG=$2
- RET=`awk -v idx=$IDX -F: '/Prev Backup JobId/ { pbid[j++]=$2 } /New Backup JobId/ { nbid[i++]=$2 } END { print pbid[idx] " " nbid[idx] }' $LOG`
-}
-
-get_info 0 ${cwd}/tmp/log11.out
+get_mig_info 0 ${cwd}/tmp/log11.out
set $RET
if [ -z "$2" -o "$2" = 0 ]; then
print_debug "The first job must have been copied"
fi
-get_info 1 ${cwd}/tmp/log11.out
+get_mig_info 1 ${cwd}/tmp/log11.out
set $RET
if [ -z "$2" -o "$2" != 0 ]; then
print_debug "The second job have no files, it can't have been copied"
fi
-get_info 0 ${cwd}/tmp/log4.out
+get_mig_info 0 ${cwd}/tmp/log4.out
set $RET
if [ -z "$2" -o "$2" = 0 ]; then