From: Kern Sibbald Date: Thu, 13 Dec 2007 14:32:18 +0000 (+0000) Subject: Add new German manual X-Git-Tag: Release-3.0.0~2151 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=5f26f202312d725575af3b03d2744a7dcbc4af71;p=bacula%2Fdocs Add new German manual --- diff --git a/docs/Makefile.in b/docs/Makefile.in index 451e35e2..1005e13f 100644 --- a/docs/Makefile.in +++ b/docs/Makefile.in @@ -16,6 +16,11 @@ thisdir = docs # Distribution variables # +de_dirs = manuals/de/catalog manuals/de/concepts manuals/de/console \ + manuals/de/developers manuals/de/install manuals/de/problems \ + manuals/de/utility + + en_dirs = manuals/en/catalog manuals/en/concepts manuals/en/console \ manuals/en/developers manuals/en/install manuals/en/problems \ manuals/en/utility @@ -45,6 +50,14 @@ french: echo ""; echo "";)); \ done +german: + @for I in ${de_dirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; \ + $(MAKE) $@ || (echo ""; echo ""; echo " ====== Error in `pwd` ======"; \ + echo ""; echo "";)); \ + done + + configure: autoconf/configure.in autoconf/aclocal.m4 autoconf/acconfig.h cd $(srcdir); ${RMF} -f config.cache config.log config.out config.status src/config.h @@ -77,9 +90,8 @@ $(basedir)/$(VERNAME).lsm: LSM.in $(srcdir)/../autoconf/Make.common.in $(srcdir) clean: $(RMF) *~ 1 2 3 bacula-doc*.tar.gz - (cd manual-de; make clean) (cd bacula-web; make clean) - @for I in ${en_dirs} ${fr_dirs}; \ + @for I in ${en_dirs} ${fr_dirs} ${de_dirs}; \ do (cd $$I; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1); done @@ -90,9 +102,8 @@ distclean: clean $(RMF) -r CVS html-manual/CVS home-page/CVS techlogs/CVS $(RMF) -rf autom4te.cache bacula-doc-* config.log config.out $(RMF) -f config.status kernsconfig - (cd manual-de; make distclean) (cd bacula-web; make distclean) - @for I in ${en_dirs} ${fr_dirs}; \ + @for I in ${en_dirs} ${fr_dirs} ${de_dirs}; \ do (cd $$I; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1); done diff --git a/docs/autoconf/configure.in b/docs/autoconf/configure.in index 48e7cba6..22b7d640 100644 --- a/docs/autoconf/configure.in +++ b/docs/autoconf/configure.in @@ -90,6 +90,27 @@ AC_SUBST_FILE(MCOMMON) AC_OUTPUT([ \ autoconf/Make.common \ Makefile \ + manuals/de/catalog/Makefile \ + manuals/de/catalog/update_version \ + manuals/de/catalog/version.tex \ + manuals/de/concepts/Makefile \ + manuals/de/concepts/update_version \ + manuals/de/concepts/version.tex \ + manuals/de/console/Makefile \ + manuals/de/console/update_version \ + manuals/de/console/version.tex \ + manuals/de/developers/Makefile \ + manuals/de/developers/update_version \ + manuals/de/developers/version.tex \ + manuals/de/install/Makefile \ + manuals/de/install/update_version \ + manuals/de/install/version.tex \ + manuals/de/problems/Makefile \ + manuals/de/problems/update_version \ + manuals/de/problems/version.tex \ + manuals/de/utility/Makefile \ + manuals/de/utility/update_version \ + manuals/de/utility/version.tex \ manuals/en/catalog/Makefile \ manuals/en/catalog/update_version \ manuals/en/catalog/version.tex \ @@ -132,15 +153,19 @@ AC_OUTPUT([ \ manuals/fr/utility/Makefile \ manuals/fr/utility/update_version \ manuals/fr/utility/version.tex \ - manual-de/Makefile \ - manual-de/version.tex \ - manual-de/update_version \ bacula-web/Makefile \ bacula-web/version.tex \ $PFILES ], [ ] ) +chmod 766 manuals/de/catalog/update_version +chmod 766 manuals/de/concepts/update_version +chmod 766 manuals/de/console/update_version +chmod 766 manuals/de/developers/update_version +chmod 766 manuals/de/install/update_version +chmod 766 manuals/de/problems/update_version +chmod 766 manuals/de/utility/update_version chmod 766 manuals/en/catalog/update_version chmod 766 manuals/en/concepts/update_version chmod 766 manuals/en/console/update_version @@ -155,7 +180,6 @@ chmod 766 manuals/fr/developers/update_version chmod 766 manuals/fr/install/update_version chmod 766 manuals/fr/problems/update_version chmod 766 manuals/fr/utility/update_version -chmod 766 manual-de/update_version echo " Configuration on `date`: diff --git a/docs/configure b/docs/configure index 994c6498..43b3278f 100755 --- a/docs/configure +++ b/docs/configure @@ -1768,7 +1768,7 @@ MCOMMON=./autoconf/Make.common - ac_config_files="$ac_config_files autoconf/Make.common Makefile manuals/en/catalog/Makefile manuals/en/catalog/update_version manuals/en/catalog/version.tex manuals/en/concepts/Makefile manuals/en/concepts/update_version manuals/en/concepts/version.tex manuals/en/console/Makefile manuals/en/console/update_version manuals/en/console/version.tex manuals/en/developers/Makefile manuals/en/developers/update_version manuals/en/developers/version.tex manuals/en/install/Makefile manuals/en/install/update_version manuals/en/install/version.tex manuals/en/problems/Makefile manuals/en/problems/update_version manuals/en/problems/version.tex manuals/en/utility/Makefile manuals/en/utility/update_version manuals/en/utility/version.tex manuals/fr/catalog/Makefile manuals/fr/catalog/update_version manuals/fr/catalog/version.tex manuals/fr/concepts/Makefile manuals/fr/concepts/update_version manuals/fr/concepts/version.tex manuals/fr/console/Makefile manuals/fr/console/update_version manuals/fr/console/version.tex manuals/fr/developers/Makefile manuals/fr/developers/update_version manuals/fr/developers/version.tex manuals/fr/install/Makefile manuals/fr/install/update_version manuals/fr/install/version.tex manuals/fr/problems/Makefile manuals/fr/problems/update_version manuals/fr/problems/version.tex manuals/fr/utility/Makefile manuals/fr/utility/update_version manuals/fr/utility/version.tex manual-de/Makefile manual-de/version.tex manual-de/update_version bacula-web/Makefile bacula-web/version.tex $PFILES" + ac_config_files="$ac_config_files autoconf/Make.common Makefile manuals/de/catalog/Makefile manuals/de/catalog/update_version manuals/de/catalog/version.tex manuals/de/concepts/Makefile manuals/de/concepts/update_version manuals/de/concepts/version.tex manuals/de/console/Makefile manuals/de/console/update_version manuals/de/console/version.tex manuals/de/developers/Makefile manuals/de/developers/update_version manuals/de/developers/version.tex manuals/de/install/Makefile manuals/de/install/update_version manuals/de/install/version.tex manuals/de/problems/Makefile manuals/de/problems/update_version manuals/de/problems/version.tex manuals/de/utility/Makefile manuals/de/utility/update_version manuals/de/utility/version.tex manuals/en/catalog/Makefile manuals/en/catalog/update_version manuals/en/catalog/version.tex manuals/en/concepts/Makefile manuals/en/concepts/update_version manuals/en/concepts/version.tex manuals/en/console/Makefile manuals/en/console/update_version manuals/en/console/version.tex manuals/en/developers/Makefile manuals/en/developers/update_version manuals/en/developers/version.tex manuals/en/install/Makefile manuals/en/install/update_version manuals/en/install/version.tex manuals/en/problems/Makefile manuals/en/problems/update_version manuals/en/problems/version.tex manuals/en/utility/Makefile manuals/en/utility/update_version manuals/en/utility/version.tex manuals/fr/catalog/Makefile manuals/fr/catalog/update_version manuals/fr/catalog/version.tex manuals/fr/concepts/Makefile manuals/fr/concepts/update_version manuals/fr/concepts/version.tex manuals/fr/console/Makefile manuals/fr/console/update_version manuals/fr/console/version.tex manuals/fr/developers/Makefile manuals/fr/developers/update_version manuals/fr/developers/version.tex manuals/fr/install/Makefile manuals/fr/install/update_version manuals/fr/install/version.tex manuals/fr/problems/Makefile manuals/fr/problems/update_version manuals/fr/problems/version.tex manuals/fr/utility/Makefile manuals/fr/utility/update_version manuals/fr/utility/version.tex bacula-web/Makefile bacula-web/version.tex $PFILES" ac_config_commands="$ac_config_commands default" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -2326,6 +2326,27 @@ do # Handling of arguments. "autoconf/Make.common" ) CONFIG_FILES="$CONFIG_FILES autoconf/Make.common" ;; "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "manuals/de/catalog/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/catalog/Makefile" ;; + "manuals/de/catalog/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/catalog/update_version" ;; + "manuals/de/catalog/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/catalog/version.tex" ;; + "manuals/de/concepts/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/concepts/Makefile" ;; + "manuals/de/concepts/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/concepts/update_version" ;; + "manuals/de/concepts/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/concepts/version.tex" ;; + "manuals/de/console/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/console/Makefile" ;; + "manuals/de/console/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/console/update_version" ;; + "manuals/de/console/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/console/version.tex" ;; + "manuals/de/developers/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/developers/Makefile" ;; + "manuals/de/developers/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/developers/update_version" ;; + "manuals/de/developers/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/developers/version.tex" ;; + "manuals/de/install/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/install/Makefile" ;; + "manuals/de/install/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/install/update_version" ;; + "manuals/de/install/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/install/version.tex" ;; + "manuals/de/problems/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/problems/Makefile" ;; + "manuals/de/problems/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/problems/update_version" ;; + "manuals/de/problems/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/problems/version.tex" ;; + "manuals/de/utility/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/de/utility/Makefile" ;; + "manuals/de/utility/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/de/utility/update_version" ;; + "manuals/de/utility/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/de/utility/version.tex" ;; "manuals/en/catalog/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/Makefile" ;; "manuals/en/catalog/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/update_version" ;; "manuals/en/catalog/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/version.tex" ;; @@ -2368,9 +2389,6 @@ do "manuals/fr/utility/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/fr/utility/Makefile" ;; "manuals/fr/utility/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/fr/utility/update_version" ;; "manuals/fr/utility/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/fr/utility/version.tex" ;; - "manual-de/Makefile" ) CONFIG_FILES="$CONFIG_FILES manual-de/Makefile" ;; - "manual-de/version.tex" ) CONFIG_FILES="$CONFIG_FILES manual-de/version.tex" ;; - "manual-de/update_version" ) CONFIG_FILES="$CONFIG_FILES manual-de/update_version" ;; "bacula-web/Makefile" ) CONFIG_FILES="$CONFIG_FILES bacula-web/Makefile" ;; "bacula-web/version.tex" ) CONFIG_FILES="$CONFIG_FILES bacula-web/version.tex" ;; "$PFILES" ) CONFIG_FILES="$CONFIG_FILES $PFILES" ;; @@ -2866,6 +2884,13 @@ if test "$no_create" != yes; then fi +chmod 766 manuals/de/catalog/update_version +chmod 766 manuals/de/concepts/update_version +chmod 766 manuals/de/console/update_version +chmod 766 manuals/de/developers/update_version +chmod 766 manuals/de/install/update_version +chmod 766 manuals/de/problems/update_version +chmod 766 manuals/de/utility/update_version chmod 766 manuals/en/catalog/update_version chmod 766 manuals/en/concepts/update_version chmod 766 manuals/en/console/update_version @@ -2880,7 +2905,6 @@ chmod 766 manuals/fr/developers/update_version chmod 766 manuals/fr/install/update_version chmod 766 manuals/fr/problems/update_version chmod 766 manuals/fr/utility/update_version -chmod 766 manual-de/update_version echo " Configuration on `date`: diff --git a/docs/manual-de/console.tex b/docs/manual-de/console.tex index 19d4f653..57b235ff 100644 --- a/docs/manual-de/console.tex +++ b/docs/manual-de/console.tex @@ -5,8 +5,8 @@ \label{_ConsoleChapter} \index[general]{Console!Bacula} \index[general]{Bacula Console} -\index[console]{Console!Bacula} -\index[console]{Bacula Console} +\index[general]{Console!Bacula} +\index[general]{Bacula Console} Die {\bf Bacula Console} (manchmal auch das BenutzerInterface genannt) ist ein Programm, dass es dem Anwender oder System Aministrator erlaub, @@ -34,8 +34,8 @@ eines neuen Tapes mittels der Console, wird Bacula weiterarbeiten k\"{o}nnen. \section{Console Konfiguration} \index[general]{Console Konfiguration} \index[general]{Konfiguration!Console} -\index[console]{Console Konfiguration} -\index[console]{Konfiguration!Console} +\index[general]{Console Konfiguration} +\index[general]{Konfiguration!Console} Wenn Sie die Bacula-Console starten, liest sie ihre Standard-Konfigurations-Datei namens {\bf bconsole.conf}, bzw. {\bf bgnome-console.conf} f\"{u}r die GNOME-Console, ein. @@ -46,8 +46,8 @@ F\"{u}r weitere Informationen zu dieser Datei, lesen Sie bitte das Kapitel \"{u} \section{Benutzung des Console-Programms} \index[general]{Benutzung des Console-Programms} \index[general]{Programm!Benutzung des Console-} -\index[console]{Benutzung des Console-Programms} -\index[console]{Programm!Benutzungs des Console-} +\index[general]{Benutzung des Console-Programms} +\index[general]{Programm!Benutzungs des Console-} Das Console-Programm kann mit den folgenden Optionen gestartet werden: \footnotesize @@ -112,8 +112,8 @@ dass Sie dieses Limit nicht \"{u}berschreiten. \section{Beenden des Console-Programs} \index[general]{Programm!Beenden des Console-} \index[general]{Beenden des Console-Programms} -\index[console]{Programm!Beenden des Console-} -\index[console]{Beenden des Console-Programms} +\index[general]{Programm!Beenden des Console-} +\index[general]{Beenden des Console-Programms} Normalerweise beenden Sie das Console-Programm durch die Eingabe von {\bf quit} oder {\bf exit}. Allerdings wartet die Console bis der Director-Dienst das Kommando best\"{a}tigt. Wenn der @@ -133,8 +133,8 @@ das Kommando abzubrechen. \section{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} \index[general]{Schl\"{u}sselw\"{o}rter!Alphabetische Liste der Console} \index[general]{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} -\index[console]{Schl\"{u}sselw\"{o}rter!Alphabetische Liste der Console} -\index[console]{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} +\index[general]{Schl\"{u}sselw\"{o}rter!Alphabetische Liste der Console} +\index[general]{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} Wenn es nicht anders angegeben ist, ben\"{o}tigt jedes der folgenden Schl\"{u}sselw\"{o}rter (Parameter der Console-Befehle) ein Argument, welches dem Schl\"{u}sselwort, getrennt durch ein Gleichheitszeichen, folgt. @@ -289,15 +289,15 @@ Reihenfolge sein kann. \section{Alphabetische Liste der Console-Kommandos} \index[general]{Kommandos!Alphabetische Liste der Console-} \index[general]{Alphabetische Liste der Console-Kommandos} -\index[console]{Kommandos!Alphabetische Liste der Console-} -\index[console]{Alphabetische Liste der Console-Kommandos} +\index[general]{Kommandos!Alphabetische Liste der Console-} +\index[general]{Alphabetische Liste der Console-Kommandos} Die folgenden Kommandos sind derzeit verf\"{u}gbar: \begin{description} \item [{add [pool=\lt{}pool-name\gt{} storage=\lt{}storage\gt{} jobid=\lt{}JobId\gt{}]} ] - \index[console]{add} + \index[general]{add} Das add-Kommando wird benutzt um Volumes zu einem bestehenden Pool hinzuzuf\"{u}gen. Dazu wird der Volume-Eintrag in der Datenbank erzeugt und das Volume dem Pool zugeordnet. Dabei erfolgt kein physikalischer Zugriff @@ -321,7 +321,7 @@ Die folgenden Kommandos sind derzeit verf\"{u}gbar: in der Beschreibung des label-Kommandos. \item [autodisplay on/off] - \index[console]{autodisplay on/off} + \index[general]{autodisplay on/off} Das autodisplay-Kommando kennt zwei Parameter: {\bf on} und {\bf off}, wodurch die automatische Anzeige von Nachrichten in der Console entsprechend ein- oder ausgeschaltet wird. Der Standardwert ist {\bf off}, was bedeutet, dass @@ -337,7 +337,7 @@ Die folgenden Kommandos sind derzeit verf\"{u}gbar: empfangen hat. \item [automount on/off] - \index[console]{automount on/off} + \index[general]{automount on/off} Das automount-Kommando kennt zwei Parameter: {\bf on} und {\bf off}, die entsprechend das automatische mounten nach dem labeln ({\bf label}-Kommando) an- oder ausschalten. Der Standardwert ist on. Wenn automount ausgeschaltet ist, @@ -345,7 +345,7 @@ Die folgenden Kommandos sind derzeit verf\"{u}gbar: um es benutzen zu k\"{o}nnen. \item [{cancel [jobid=\lt{}number\gt{} job=\lt{}job-name\gt{} ujobid=\lt{}unique-jobid\gt{}]}] - \index[console]{cancel jobid} + \index[general]{cancel jobid} Das cancel-Kommande wird benutzt um einen Job abzubrechen und kennt die Parameter {\bf jobid=nnn} or {\bf job=xxx}, wober jobid die numerische JobID ist und job der Job-Name. Wenn Sie weder job noch jobid angeben, listet die Console @@ -357,7 +357,7 @@ Die folgenden Kommandos sind derzeit verf\"{u}gbar: Dise Zeit ist aber abh\"{a}ngig davon, was der Job gerade tut. \item [{create [pool=\lt{}pool-name\gt{}]}] - \index[console]{create pool} + \index[general]{create pool} Das create-Kommando wird normalerweise nicht benutzt, da die Pool-Eintr\"{a}ge im Katalog automatisch angelegt werden, wenn der Director-Dienst startet und er seine Pool-Konfiguration aus den Konfigurations-Dateien einliest. Falls ben\"{o}tigt, @@ -380,7 +380,7 @@ Die folgenden Kommandos sind derzeit verf\"{u}gbar: \item [{delete [volume=\lt{}vol-name\gt{} pool=\lt{}pool-name\gt{} job jobid=\lt{}id\gt{}]}] - \index[console]{delete} + \index[general]{delete} Das delete-Kommando wird benutzt um ein Volume, einen Pool oder einen Job-Eintrag, sowie jeweils alle dazugeh\"{o}rigen Datenbank-Eintr\"{a}ge, aus dem Katalog zu entfernen. Das Kommando \"{a}ndert nur die Katalog-Datenbank, es hat keine @@ -417,14 +417,14 @@ delete Job JobId=n,m,o-r,t ... (z.B. o-r) verarbeiten. \item [disable job\lt{}job-name\gt{}] - \index[console]{disable} + \index[general]{disable} Das disable-Kommando erlaubt es Ihnen, zu verhindern das ein Job automatisch durch den Director-Dienst ausgef\"{u}hrt wird. Wenn Sie den Director-Dienst neu starten, wird der Status des Jobs wieder auf den Wert gesetzt, der im Job-Eintrag der Director-Konfiguration eingetragen ist. \item [enable job\lt{}job-name\gt{}] - \index[console]{enable} + \index[general]{enable} Das enable-Kommando erlaubt es Ihnen, einen Job der durch das disable-Kommando aus der automatischen Job-Planung entfernt wurde, wieder zu aktivieren. Wenn Sie den Director-Dienst neu starten, @@ -433,7 +433,7 @@ delete Job JobId=n,m,o-r,t ... \label{estimate} \item [estimate] - \index[console]{estimate} + \index[general]{estimate} Mit dem estimate-Kommando k\"{o}nnen Sie sich anzeigen lassen, welche Dateien durch einen bestimmten Job gesichert werden, ohne diesen Job ausf\"{u}hren zu m\"{u}ssen. Standardm\"{a}{\ss}ig wird dabei ein Voll-Backup @@ -489,12 +489,12 @@ Zum Beispiel k\"{o}nnen Sie folgendes eingeben: f\"{u}r ein FileSet anzuzeigen, bei dem die sparse-Option gesetzt ist. \item [help] - \index[console]{help} + \index[general]{help} Das help-Kommando zeigt alle verf\"{u}gbaren Kommandos mit einer kurzen Beschreibung an. \item [label] - \index[console]{label} - \index[console]{relabel} + \index[general]{label} + \index[general]{relabel} \index[general]{label} \index[general]{relabel} Das label-Kommando wird benutzt um physikalische Volumes zu labeln. @@ -607,7 +607,7 @@ label storage=xxx pool=yyy slots=1-5,10 barcodes \normalsize \item [list] - \index[console]{list} + \index[general]{list} Das list-Kommando zeigt den angegebenen Inhalt der Katalog-Datenbank an. Die verschiedenen Felder jedes Eintrags werden in einer Zeile ausgegeben. Die verschiedenen M\"{o}glichkeiten sind: @@ -623,8 +623,8 @@ label storage=xxx pool=yyy slots=1-5,10 barcodes list jobname= (identisch mit dem oberen) - Im oberen Beispiel k\"{o}nnen Sie auch den Parameter limit=nn - hinzuf\"{u}gen, um die Ausgabe des Kommandos auf nn Jobs zu begrenzen + Im oberen Beispiel k\"{o}nnen Sie auch den Parameter limit=nn + hinzuf\"{u}gen, um die Ausgabe des Kommandos auf nn Jobs zu begrenzen list jobmedia @@ -722,7 +722,7 @@ label storage=xxx pool=yyy slots=1-5,10 barcodes file, you use the Console command {\bf show clients}. \item [llist] - \index[console]{llist} + \index[general]{llist} The llist or "long list" command takes all the same arguments that the list command described above does. The difference is that the llist command list the full contents of each database record selected. It @@ -771,12 +771,12 @@ label storage=xxx pool=yyy slots=1-5,10 barcodes \normalsize \item [messages] - \index[console]{messages} + \index[general]{messages} This command causes any pending console messages to be immediately displayed. \item [mount] - \index[console]{mount} + \index[general]{mount} The mount command is used to get Bacula to read a volume on a physical device. It is a way to tell Bacula that you have mounted a tape and that Bacula should examine the tape. This command is normally @@ -798,7 +798,7 @@ mount [ jobid=\lt{}id\gt{} | job=\lt{}job-name\gt{} ] program. \item[python] - \index[console]{python} + \index[general]{python} The python command takes a single argument {\bf restart}: python restart @@ -812,7 +812,7 @@ python restart \label{ManualPruning} \item [prune] - \index[console]{prune} + \index[general]{prune} The Prune command allows you to safely remove expired database records from Jobs and Volumes. This command works only on the Catalog database and does not affect data written to Volumes. In all cases, the Prune @@ -828,7 +828,7 @@ volume=\lt{}volume-name\gt{} Append, otherwise the pruning will not take place. \item [purge] - \index[console]{purge} + \index[general]{purge} The Purge command will delete associated Catalog database records from Jobs and Volumes without considering the retention period. {\bf Purge} works only on the Catalog database and does not affect data written to @@ -849,7 +849,7 @@ For the {\bf purge} command to work on Volume Catalog database records the The actual data written to the Volume will be unaffected by this command. \item [relabel] - \index[console]{relabel} + \index[general]{relabel} \index[general]{relabel} This command is used to label physical volumes. The full form of this command is: @@ -867,7 +867,7 @@ relabel storage=\lt{}storage-name\gt{} oldvolume=\lt{}old-volume-name\gt{} on the Volume is lost and cannot be recovered. \item [release] - \index[console]{release} + \index[general]{release} This command is used to cause the Storage daemon to rewind (release) the current tape in the drive, and to re-read the Volume label the next time the tape is used. @@ -884,7 +884,7 @@ release storage=\lt{}storage-name\gt{} command to cause Bacula to completely release (close) the device. \item [reload] - \index[console]{reload} + \index[general]{reload} The reload command causes the Director to re-read its configuration file and apply the new values. The new values will take effect immediately for all new jobs. However, if you change schedules, @@ -909,7 +909,7 @@ release storage=\lt{}storage-name\gt{} \label{restore_command} \item [restore] - \index[console]{restore} + \index[general]{restore} The restore command allows you to select one or more Jobs (JobIds) to be restored using various methods. Once the JobIds are selected, the File records for those Jobs are placed in an internal Bacula directory tree, @@ -937,7 +937,7 @@ restore storage=\lt{}storage-name\gt{} client=\lt{}backup-client-name\gt{} to that client. \item [run] - \index[console]{run} + \index[general]{run} This command allows you to schedule jobs to be run immediately. The full form of the command is: @@ -1012,11 +1012,11 @@ time. Use the {\bf mod} option and select {\bf When} (no. 6). Then enter the desired start time in YYYY-MM-DD HH:MM:SS format. \item [setdebug] - \index[console]{setdebug} - \index[dir]{setdebug} - \index[dir]{debugging} - \index[dir]{debugging Win32} - \index[dir]{Windows!debugging} + \index[general]{setdebug} + \index[general]{setdebug} + \index[general]{debugging} + \index[general]{debugging Win32} + \index[general]{Windows!debugging} This command is used to set the debug level in each daemon. The form of this command is: @@ -1033,8 +1033,8 @@ setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | you are done. \item [show] - \index[console]{show} - \index[dir]{show} + \index[general]{show} + \index[general]{show} The show command will list the Director's resource records as defined in the Director's configuration file (normally {\bf bacula-dir.conf}). This command is used mainly for debugging purposes by developers. @@ -1045,7 +1045,7 @@ setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | with the {\bf list}, which displays the contents of the catalog. \item [sqlquery] - \index[console]{sqlquery} + \index[general]{sqlquery} The sqlquery command puts the Console program into SQL query mode where each line you enter is concatenated to the previous line until a semicolon (;) is seen. The semicolon terminates the command, which is @@ -1065,7 +1065,7 @@ setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | SQLite documentation. \item [status] - \index[dir]{status} + \index[general]{status} This command will display the status of the next jobs that are scheduled during the next 24 hours as well as the status of currently running jobs. The full form of this command is: @@ -1197,7 +1197,7 @@ using the "File" device is that the device is blocked waiting for media -- that is Bacula needs you to label a Volume. \item [unmount] - \index[console]{unmount} + \index[general]{unmount} This command causes the indicated Bacula Storage daemon to unmount the specified device. The forms of the command are the same as the mount command: \footnotesize @@ -1219,7 +1219,7 @@ unmount [ jobid= | job= ] \label{UpdateCommand} \item [update] - \index[console]{update} + \index[general]{update} This command will update the catalog for either a specific Pool record, a Volume record, or the Slots in an autochanger with barcode capability. In the case of updating a Pool record, the new information will be automatically taken @@ -1295,7 +1295,7 @@ wish to change. The following Volume parameters may be changed: \normalsize \item [use] - \index[console]{use} + \index[general]{use} This command allows you to specify which Catalog database to use. Normally, you will be using only one database so this will be done automatically. In the case that you are using more than one database, you can use this command @@ -1305,7 +1305,7 @@ use \lt{}database-name\gt{} \item [var] \label{var} - \index[console]{var name} + \index[general]{var name} This command takes a string or quoted string and does variable expansion on it the same way variable expansion is done on the {\bf LabelFormat} string. Thus, for the most part, you can test your LabelFormat strings. The @@ -1315,11 +1315,11 @@ use \lt{}database-name\gt{} good idea of what is going to happen in the real case. \item [version] - \index[console]{version} + \index[general]{version} The command prints the Director's version. \item [quit] - \index[console]{quit} + \index[general]{quit} This command terminates the console program. The console program sends the {\bf quit} request to the Director and waits for acknowledgment. If the Director is busy doing a previous command for you that has not terminated, it @@ -1327,7 +1327,7 @@ use \lt{}database-name\gt{} command (i.e. quit preceded by a period). \item [query] - \index[console]{query} + \index[general]{query} This command reads a predefined SQL query from the query file (the name and location of the query file is defined with the QueryFile resource record in the Director's configuration file). You are prompted to select a query from @@ -1354,11 +1354,11 @@ Choose a query (1-9): \normalsize \item [exit] - \index[console]{exit} + \index[general]{exit} This command terminates the console program. \item [wait] - \index[console]{wait} + \index[general]{wait} The wait command causes the Director to pause until there are no jobs running. This command is useful in a batch situation such as regression testing where you wish to start a job and wait until that job completes @@ -1423,11 +1423,11 @@ the tty console program and not in the GNOME Console. These commands are: \begin{description} \item [@input \lt{}filename\gt{}] - \index[console]{@input \lt{}filename\gt{}} + \index[general]{@input \lt{}filename\gt{}} Read and execute the commands contained in the file specified. \item [@output \lt{}filename\gt{} w/a] - \index[console]{@output \lt{}filename\gt{} w/a} + \index[general]{@output \lt{}filename\gt{} w/a} Send all following output to the filename specified either overwriting the file (w) or appending to the file (a). To redirect the output to the terminal, simply enter {\bf @output} without a filename specification. @@ -1444,32 +1444,32 @@ regression test might be: \normalsize \item [@tee \lt{}filename\gt{} w/a] - \index[console]{@tee \lt{}filename\gt{} w/a} + \index[general]{@tee \lt{}filename\gt{} w/a} Send all subsequent output to both the specified file and the terminal. It is turned off by specifying {\bf @tee} or {\bf @output} without a filename. \item [@sleep \lt{}seconds\gt{}] - \index[console]{@sleep \lt{}seconds\gt{}} + \index[general]{@sleep \lt{}seconds\gt{}} Sleep the specified number of seconds. \item [@time] - \index[console]{@time} + \index[general]{@time} Print the current time and date. \item [@version] - \index[console]{@version} + \index[general]{@version} Print the console's version. \item [@quit] - \index[console]{@quit} + \index[general]{@quit} quit \item [@exit] - \index[console]{@exit} + \index[general]{@exit} quit \item [@\# anything] - \index[console]{anything} + \index[general]{anything} Comment \end{description} diff --git a/docs/manuals/de/catalog/Makefile b/docs/manuals/de/catalog/Makefile new file mode 100644 index 00000000..7f8c78fa --- /dev/null +++ b/docs/manuals/de/catalog/Makefile @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=catalog + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Catalog Database Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Catalo*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/catalog/Makefile.in b/docs/manuals/de/catalog/Makefile.in new file mode 100644 index 00000000..7f8c78fa --- /dev/null +++ b/docs/manuals/de/catalog/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=catalog + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Catalog Database Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Catalo*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/catalog/catalog.css b/docs/manuals/de/catalog/catalog.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/de/catalog/catalog.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/de/catalog/catalog.tex b/docs/manuals/de/catalog/catalog.tex new file mode 100644 index 00000000..4a6ad9f5 --- /dev/null +++ b/docs/manuals/de/catalog/catalog.tex @@ -0,0 +1,81 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Catalog Database Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{catmaintenance} +\include{mysql} +\include{postgresql} +\include{sqlite} +\include{internaldb} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/de/catalog/catmaintenance.tex b/docs/manuals/de/catalog/catmaintenance.tex new file mode 100644 index 00000000..c5c175ea --- /dev/null +++ b/docs/manuals/de/catalog/catmaintenance.tex @@ -0,0 +1,730 @@ +%% +%% + +\chapter{Katalog Verwaltung} +\label{CatMaintenanceChapter} +\index[general]{Verwaltung!Katalog } +\index[general]{Katalog Verwaltung} + +Ohne eine ordnungsgem\"{a}{\ss}e Einrichtung und Verwaltung kann es sein, +dass Ihr Katalog immer gr\"{o}{\ss}er wird wenn Jobs laufen und Daten gesichert werden. +Zudem kann der Katalog ineffizient und langsam werden. Wie schnell der Katalog w\"{a}chst, +h\"{a}ngt von der Anzahl der Jobs und der Menge der dabei gesicherten Dateien ab. +Durch das L\"{o}schen von Eintr\"{a}gen im Katalog kann Platz geschaffen werden f\"{u}r +neue Eintr\"{a}ge der folgenden Jobs. Durch regelm\"{a}{\ss}iges l\"{o}schen alter abgelaufener +Daten (\"{a}lter als durch die Aufbewahrungszeitr\"{a}ume (Retention Periods) angegeben), +wird daf\"{u}r gesorgt, dass die Katalog-Datenbank eine konstante Gr\"{o}{\ss}e beibeh\"{a}lt. + +Sie k\"{o}nnen mit der vorgegebenen Konfiguration beginnen, sie enth\"{a}lt bereits +sinnvolle Vorgaben f\"{u}r eine kleine Anzahl von Clients (kleiner 5), in diesem Fall +wird die Katalogwartung, wenn Sie einige hundert Megabyte freien Plattenplatz haben, +nicht dringlich sein. Was aber auch immer der Fall ist, einiges Wissen \"{u}ber +die Retention Periods/Aufbewahrungszeitr\"{a}ume der Daten im Katalog und auf den Volumes ist hilfreich. + +\section{Einstellung der Aufbewahrungszeitr\"{a}ume} +\label{Retention} +\index[general]{Einstellung der Aufbewahrungszeitr\"{a}ume } +\index[general]{Zeitr\"{a}ume!Einstellung der Aufbewahrungs- } + +Bacula benutzt drei verschiedene Aufbewahrungszeitr\"{a}ume: +die {\bf File Retention}: der Aufbewahrungszeitraum f\"{u}r Dateien, +die {\bf Job Retention}: der Aufbewahrungszeitraum f\"{u}r Jobs und +die {\bf Volume Retention}: der Aufbewahrungszeitraum f\"{u}r Volumes. +Von diesen drei ist der Aufbewahrungszeitraum f\"{u}r Dateien der entscheidende, +wenn es darum geht, wie gro{\ss} die Datenbank werden wird. + +Die {\bf File Retention} und die {\bf Job Retention} werden in der Client-Konfiguration, +wie unten gezeigt, angegeben. Die {\bf Volume Retention} wird in der Pool-Konfiguration +angegeben, genauere Informationen dazu finden Sie im n\"{a}chsten Kapitel dieses Handbuchs. + +\begin{description} + +\item [File Retention = \lt{}time-period-specification\gt{}] + \index[dir]{File Retention } + Der Aufbewahrungszeitraum f\"{u}r Dateien gibt die Zeitspanne an, die die +Datei-Eintr\"{a}ge in der Katalog-Datenbank aufbewahrt werden. +Wenn {\bf AutoPrune} in der Client-Konfiguration auf {\bf yes} gesetzt ist, +wird Bacula die Katalog-Eintr\"{a}ge der Dateien l\"{o}schen, die \"{a}lter als +dieser Zeitraum sind. Das L\"{o}schen erfolgt nach Beendigung eines Jobs des entsprechenden Clients. +Bitte beachten Sie, dass die Client-Datenbank-Eintr\"{a}ge eine Kopie der Aufbewahrungszeitr\"{a}ume +f\"{u}r Dateien und Jobs enthalten, Bacula aber die Zeitr\"{a}ume aus der aktuellen Client-Konfiguration +des Director-Dienstes benutzt um alte Katalog-Eintr\"{a}ge zu l\"{o}schen. + +Da die Datei-Eintr\"{a}ge ca. 80 Prozent der Katalog-Datenbankgr\"{o}{\ss}e ausmachen, +sollten Sie sorgf\"{a}lltig ermitteln \"{u}ber welchen Zeitraum Sie die Eintr\"{a}ge aufbewahren wollen. +Nachdem die Datei-Eintr\"{a}ge gel\"{o}scht wurden, ist es nicht mehr m\"{o}glich einzelne dieser Dateien +mit einem R\"{u}cksicherungs-Job wiederherzustellen, aber die Bacula-Versionen 1.37 und sp\"{a}ter +sind in der Lage, aufgrund des Job-Eintrags im Katalog, alle Dateien des Jobs zur\"{u}ckzusichern +solange der Job-Eintrag im Katalog vorhanden ist. + +Aufbewahrungszeitr\"{a}ume werden in Sekunden angegeben, aber der Einfachheit halber sind auch +eine Reihe von Hilfsangaben m\"{o}glich, so dass man Minuten, Stunden, Tage, Wochen, +Monate, Quartale und Jahre konfigurieren kann. Lesen Sie bitte das \ilink{Konfigurations-Kapitel}{Time} +dieses Handbuchs um mehr \"{u}ber diese Hilfsangaben zu erfahren. + +Der Standardwert der Aufbewahrungszeit f\"{u}r Dateien ist 60 Tage. + +\item [Job Retention = \lt{}time-period-specification\gt{}] + \index[dir]{Job Retention } + Der Aufbewahrungszeitraum f\"{u}r Jobs gibt die Zeitspanne an, die die +Job-Eintr\"{a}ge in der Katalog-Datenbank aufbewahrt werden. +Wenn {\bf AutoPrune} in der Client-Konfiguration auf {\bf yes} gesetzt ist, +wird Bacula die Katalog-Eintr\"{a}ge der Jobs l\"{o}schen, die \"{a}lter als +dieser Zeitraum sind. Beachten Sie, dass wenn ein Job-Eintrag gel\"{o}scht wird, +auch alle zu diesem Job geh\"{o}renden Datei- und JobMedia-Eintr\"{a}ge aus dem +Katalog gel\"{o}scht werden. Dies passiert unabh\"{a}ngig von der Aufbewahrungszeit f\"{u}r Dateien, +infolge dessen wird die Aufbewahrungszeit f\"{u}r Dateien normalerweise k\"{u}rzer sein als f\"{u}r Jobs. + +Wie oben erw\"{a}hnt, sind Sie nicht mehr in der Lage einzelne Dateien eines Jobs zur\"{u}ckzusichern, +wenn die Datei-Eintr\"{a}ge aus der Katalog-Datenbank entfernt wurden. Jedoch, solange der Job-Eintrag +im Katalog vorhanden ist, k\"{o}nnen Sie immer noch den kompletten Job mit allen Dateien wiederherstellen +(ab Bacula-Version 1.37 und gr\"{o}{\ss}er). Daher ist es eine gute Idee, die Job-Eintr\"{a}ge im Katalog +l\"{a}nger als die Datei-Eintr\"{a}ge aufzubewahren. + +Aufbewahrungszeitr\"{a}ume werden in Sekunden angegeben, aber der Einfachheit halber sind auch +eine Reihe von Hilfsangaben m\"{o}glich, so dass man Minuten, Stunden, Tage, Wochen, +Monate, Quartale und Jahre konfigurieren kann. Lesen Sie bitte das \ilink{Konfigurations-Kapitel}{Time} +dieses Handbuchs um mehr \"{u}ber diese Hilfsangaben zu erfahren. + +Der Standardwert der Aufbewahrungszeit f\"{u}r Jobs ist 180 Tage. + +\item [AutoPrune = \lt{}yes/no\gt{}] + \index[dir]{AutoPrune } + Wenn AutoPrune auf {\bf yes} (Standard) gesetzt ist, wird Bacula nach jedem Job +automatisch \"{u}berpr\"{u}fen, ob die Aufbewahrungszeit f\"{u}r bestimmte Dateien und/oder Jobs +des gerade gesicherten Clients abgelaufen ist und diese aus dem Katalog entfernen. +Falls Sie AutoPrune durch das Setzen auf {\bf no} ausschalten, wird Ihre Katalog-Datenbank mit jedem +gelaufenen Job immer gr\"{o}{\ss}er werden. +\end{description} + +\label{CompactingMySQL} +\section{Komprimieren Ihrer MySQL Datenbank} +\index[general]{Datenbank!Komprimieren Ihrer MySQL } +\index[general]{Komprimieren Ihrer MySQL Datenbank } + +Mit der Zeit, wie oben schon angemerkt, wird Ihre Datenbank dazu neigen zu wachsen. +Auch wenn Bacula regelm\"{a}{\ss}ig Datei-Eintr\"{a}ge l\"{o}scht, wird die {\bf MySQL}-Datenbank +st\"{a}ndig gr\"{o}{\ss}er werden. Um dies zu vermeiden, muss die Datenbank komprimiert werden. +Normalerweise kennen gro{\ss}e kommerzielle Datenbanken, wie Oracle, bestimmte Kommandos +um den verschwendeten Festplattenplatz wieder freizugeben. MySQL hat das {\bf OPTIMIZE TABLE} +Kommando und bei SQLite (Version 2.8.4 und gr\"{o}{\ss}er) k\"{o}nnen Sie das {\bf VACUUM} +Kommando zu diesem Zweck benutzen. Wir \"{u}berlassen es Ihnen, die N\"{u}tzlichkeit von +{\bf OPTIMIZE TABLE} oder {\bf VACUUM} zu ermitteln. + +Alle Datenbanken haben Hilfsmittel, um die enthaltenen Daten im ASCII-Format in eine Datei zu schreiben +und diese Datei dann auch wieder einzulesen. Wenn man das tut, wird die Datenbank erneut erzeugt, was ein +sehr kompaktes Datenbank-Format als Ergebnis hat. Weiter unten zeigen wir Ihnen, wie Sie das bei +MySQL, SQLite und PostgreSQL durchf\"{u}hren k\"{o}nnen. + +Bei einer {\bf MySQL} Datenbank k\"{o}nnen Sie den Inhalt der Katalog-Datenbank mit den folgenden Kommandos +in eine ASCII-Datei (bacula.sql) schreiben und neu in die Datenbank importieren: + +\footnotesize +\begin{verbatim} +mysqldump -f --opt bacula > bacula.sql +mysql bacula < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Abh\"{a}ngig von der Gr\"{o}{\ss}e Ihrer Datenbank, wird dies mehr oder weniger Zeit und auch Festplattenplatz +ben\"{o}tigen. Zum Beispiel, wenn ich in das Verzeichnis wechsle, wo meine MySQL-Datenbank liegt (typischerweise +/var/lib/mysql) und dieses Kommando ausf\"{u}hre: + +\footnotesize +\begin{verbatim} +du bacula +\end{verbatim} +\normalsize + +bekomme ich die Ausgabe {\bf 620,644}, was bedeutet dass das Verzeichnis bacula 620.644 Bl\"{o}cke +von 1024 Bytes auf der Festplatte belegt, meine Datenbank enth\"{a}lt also ca. 635 MB an Daten. +Nachdem ich das {\bf mysqldump} ausgef\"{u}hrt habe, ist die dabei entstandene Datei bacula.sql +{\bf 174.356} Bl\"{o}cke gro{\ss}, wenn diese Datei mit dem Kommando {\bf mysql bacula < bacula.sql} +wieder in die Datenbank importiert wird, ergibt sich eine Datenbankgr\"{o}{\ss}e von nur noch {\bf 210.464} +Bl\"{o}cken. Mit anderen Worten, die komprimierte Version meiner Datenbank, die seit ca. 1 Jahr +in Benutzung ist, ist ungef\"{a}hr nur noch ein Drittel so gro{\ss} wie vorher. + +Als Konsequenz wird empfohlen, auf die Gr\"{o}{\ss}e der Datenbank zu achten und sie von Zeit zu Zeit +(alle sechs Monate oder j\"{a}hrlich) zu komprimieren. + +\label{DatabaseRepair} +\label{RepairingMySQL} +\section{Reparatur Ihrer MySQL Datenbank} +\index[general]{Datenbank!Reparatur Ihrer MySQL } +\index[general]{Reparatur Ihrer MySQL Datenbank } + +Wenn Sie bemerken, dass das Schreiben der MySQL-Datenbank zu Fehlern f\"{u}hrt, +oder das der Director-Dienst h\"{a}ngt, wenn er auf die Datenbank zugreift, +sollten Sie sich die MySQL Datenbank\"{u}berpr\"{u}fungs- und Reparaturprogramme ansehen. +Welches Programm Sie laufen lassen sollten, h\"{a}ngt mit der von Ihnen benutzten Datenbank- +Indizierung zusammen. Wenn Sie das Standardverfahren nutzen, werden Sie vermutlich {\bf myisamchk} +laufen lassen. F\"{a}r n\"{a}here Information lesen Sie bitte auch: +\elink{http://dev.mysql.com/doc/refman/5.1/de/client-utility-programs.html} +{http://dev.mysql.com/doc/refman/5.1/de/client-utility-programs.html}. + +Falls die auftretenden Fehler einfache SQL-Warnungen sind, sollten Sie zuerst das von Bacula mitgelieferte +dbcheck-Programm ausf\"{u}hren, bevor Sie die MySQL-Datenbank-Reparaturprogramme nutzen. +Dieses Programm kann verwaiste Datenbankeintr\"{a}ge finden und andere Inkonsistenzen in der +Katalog-Datenbank beheben. + +Eine typische Ursache von Datenbankproblemen ist das Volllaufen einer Partition. +In solch einem Fall muss entweder zus\"{a}tzlicher Platz geschaffen werden, oder +belegter Platz freigegeben werden, bevor die Datenbank mit {\bf myisamchk} repariert werden kann. + +Hier ist ein Beispiel, wie man eine korrupte Datenbank reparieren k\"{o}nnte, falls nach dem Vollaufen +einer Partition die Datenbankprobleme mit {\bf myisamchk -r} nicht behoben werden k\"{o}nnen: + +kopieren Sie folgende Zeilen in ein Shell-Script names {\bf repair}: +\footnotesize +\begin{verbatim} +#!/bin/sh +for i in *.MYD ; do + mv $i x${i} + t=`echo $i | cut -f 1 -d '.' -` + mysql bacula <bacula.db +select * from sqlite_master where type='index' and tbl_name='File'; +\end{verbatim} +\normalsize + +Falls ein Index fehlt, im besonderen der {\bf JobId}-Index, k\"{o}nnen Sie ihn mit den folgenden Befehlen erstellen: + +\footnotesize +\begin{verbatim} +mysql bacula +CREATE INDEX file_jobid_idx on File (JobId); +CREATE INDEX file_jfp_idx on File (Job, FilenameId, PathId); +\end{verbatim} +\normalsize + + + +\label{CompactingPostgres} +\section{Komprimieren Ihrer PostgreSQL Datenbank} +\index[general]{Datenbank!Komprimieren Ihrer PostgreSQL } +\index[general]{Komprimieren Ihrer PostgreSQL Datenbank } + +\"{U}ber die Zeit, wie schon oben angemerkt, wird Ihre Datenbank wachsen. +Auch wenn Bacula regelm\"{a}{\ss}ig alte Daten l\"{o}scht, wird das PostgreSQL Kommando {\bf VACUUM} +Ihnen helfen die Datenbank zu komprimieren. Alternativ wollen Sie eventuell das {\bf vacuumdb}-Kommando nutzen, +das vom cron-Dienst gestartet werden kann. + +Alle Datenbanken haben Hilfsmittel, um die Daten in eine ASCII-Datei zu schreiben um sie dann erneut einzulesen. +Wenn Sie das tun, wird die Datenbank komplett neu aufgebaut und so eine kompaktere Version entstehen. +Wie Sie so etwas tun k\"{o}nnen, zeigt Ihnen das folgende PostgreSQL Beispiel. + +Bei einer PostgreSQL-Datenbank lassen Sie die Daten in eine ASCII-Datei schreiben und neu einlesen, +wenn Sie diese Kommandos ausf\"{u}hren: + +\footnotesize +\begin{verbatim} +pg_dump -c bacula > bacula.sql +cat bacula.sql | psql bacula +rm -f bacula.sql +\end{verbatim} +\normalsize + +Abh\"{a}gig von Ihrer Datenabnkgr\"{o}{\ss}e wird dieser Vorgang mehr oder +weniger Zeit und Festplattenplatz in Anspruch nehmen. Sie sollten vorher +in das Arbeitsverzeichnis Ihrer Datenbank wechseln (typischerweise +/var/lib/postgres/data) und die Gr\"{o}{\ss}e \"{u}berpr\"{u}fen. + +Bestimmte PostgreSQL-Nutzer empfehlen nicht die oben genannte Prozedur, sie sind der Meinung: +bei PostgreSQL ist es nicht notwendig, die Daten zu exportieren um sie dann wieder einzulesen. +Das normale Ausf\"{u}hren des {\bf VACUUM}-Kommandos reicht, um die Datenbank performant zu halten. +Wenn Sie es ganz genau machen wollen, benutzen Sie speziellen Kommandos {\bf VACUUM FULL, REINDEX} und {\bf CLUSTER} +um sich den Umweg \"{u}ber das exportieren und wiedereinlesen der Daten zu ersparen. + +Zum Schlu{\ss} wollen Sie vielleicht noch einen Blick auf die zugeh\"{o}rige PostgreSQL-Dokumentation werfen, +Sie finden sie (auf englisch) unter: +\elink{http://www.postgresql.org/docs/8.2/interactive/maintenance.html} +{http://www.postgresql.org/docs/8.2/interactive/maintenance.html}. + +\section{Komprimieren Ihrer SQLite Datenbank} +\index[general]{Komprimieren Ihrer SQLite Datenbank} +\index[general]{Datenbank!Komprimieren Ihrer SQLite } + +Lesen Sie bitte zuerst die vorherigen Abschnitte die erkl\"{a}ren, warum es erforderlich ist, eine Datenbank zu komprimieren. +SQLite-Versionen gr\"{o}{\ss}er 2.8.4 haben das {\bf Vacuum}-Kommando um die Datenbank zu komprimieren: + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo 'vacuum;' | sqlite bacula.db +\end{verbatim} +\normalsize + +Als Alternative k\"{o}nnen Sie auch die folgenden Kommandos (auf Ihr System angepasst) benutzen: + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo '.dump' | sqlite bacula.db > bacula.sql +rm -f bacula.db +sqlite bacula.db < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Wobei {\bf working-directory} das Verzeichnis ist, dass Sie in Ihrer Director-Dienst-Konfiguration angegeben haben. +Beachten Sie bitte, dass es im Fall von SQLite erforderlich ist, die alte Datenbank komplett zu l\"{o}schen, +bevor die komprimierte Version angelegt werden kann. + +\section{Migration von SQLite zu MySQL} +\index[general]{MySQL!Migration von SQLite zu } +\index[general]{Migration von SQLite zu MySQL } + +Wenn Sie Bacula anfangs mit SQLite zusammen benutzt haben, gibt es sp\"{a}ter +eine Reihe von Gr\"{u}nden, weshalb Sie eventuell auf MySQL umsteigen wollen: +SQLite belegt mehr Festplattenplatz f\"{u}r dieselbe Datenmenge als MySQL; +falls die Datenbank besch\"{a}digt wird, ist es mit SQLite problematischer als +bei MySQL oder PostgreSQL, sie wiederherzustellen. Viele Benutzer sind erfolgreich +von SQLite auf MySQL umgestiegen, indem sie zuerst die Daten exportiert haben und sie dann +mit einem z.B. Perl-Script in ein passendes Format konvertiert haben, um sie in die +MySQL-Datenbank zu importieren. Dies ist aber kein sehr einfacher Vorgang. + +\label{BackingUpBacula} +\section{Sichern Ihrer Bacula Datenbank} +\index[general]{Sichern Ihrer Bacula Datenbank} +\index[general]{Datenbank!Sichern Ihrer Bacula } + +Falls jemals der Rechner auf dem Ihre Bacula-Installation l\"{a}uft abst\"{u}rzt, +und Sie diesen wiederherstellen m\"{u}ssen, wird es einer der ersten Schritte sein, +die Datenbank zur\"{u}ckzusichern. Obwohl Bacula fr\"{o}hlich die Datenbank sichert, +wenn sie im FileSet angegeben ist, ist das kein sehr guter Weg, da Bacula die Datenbank +\"{a}ndert, w\"{a}hrend sie gesichert wird. Dadurch ist die gesicherte Datenbank wahrscheinlich +in einem inkonsistenten Zustand. Noch schlimmer ist, dass die Datenbank gesichert wird, +bevor Bacula alle Aktualisierungen durchf\"{u}hren kann. + +Um diese Problem zu umgehen, m\"{u}ssen Sie die Datenbank sichern nachdem alle Backup-Jobs +gelaufen sind. Zus\"{a}tzlich werden Sie wohl eine Kopie der Datenbank erstellen wollen, +w\"{a}hrend Bacula keine Aktualisierungen vornimmt. Um das zu erreichen, k\"{o}nnen Sie +die beiden Scripte {\bf make\_catalog\_backup} und {\bf delete\_catalog\_backup} benutzen, +die Ihrer Bacula-Version beiliegen. Diese Dateien werden, zusammen mit den anderen Bacula-Scripts, +automatisch erzeugt. Das erste Script erzeugt eine ASCII-Kopie Ihrer Datenbank namens {\bf bacula.sql} +in dem Arbeitsverzeichnis, dass Sie in der Konfiguration angegeben haben. Das zweite Script +l\"{o}scht die Datei {\bf bacula.sql} wieder. + +Die grundlegenden Arbeitsschritte damit alles korrekt funktioniert, sind folgende: + +\begin{itemize} +\item alle Backup-Jobs laufen lassen +\item wenn alle Jobs beendet sind, wird ein Catalog Backup-Job gestartet +\item Der Catalog Backup-Job muss nach den anderen Backup-Jobs laufen + +\item Benutzen Sie {\bf RunBeforeJob} um die ASCII-Sicherungsdatei zu erstellen und + {\bf RunAfterJob} um sie wieder zu l\"{o}schen +\end{itemize} + +Angenommen Sie starten alle Ihre Backup-Jobs nachts um 01:05, k\"{o}nnen Sie das Catalog-Backup +mit der folgenden zus\"{a}tzlichen Director-Dienst-Konfiguration ausf\"{u}hren lassen: + +\footnotesize +\begin{verbatim} +# Catalog-Datenbank-Backup (nach der n\"{a}chtlichen Sicherung) +Job { + Name = "BackupCatalog" + Type = Backup + Client=rufus-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = DLTDrive + Messages = Standard + Pool = Default + # Achtung!!! Das Passwort auf der Kommandozeile zu \"{u}bergeben ist nicht sicher. + # Lesen Sie bitte die Kommentare in der Datei make_catalog_backup. + RunBeforeJob = "/home/kern/bacula/bin/make_catalog_backup" + RunAfterJob = "/home/kern/bacula/bin/delete_catalog_backup" + Write Bootstrap = "/home/kern/bacula/working/BackupCatalog.bsr" +} +# Diese Schedule starten das Catalog-Backup nach den anderen Sicherungen +Schedule { + Name = "WeeklyCycleAfterBackup + Run = Level=Full sun-sat at 1:10 +} +# Das FileSet f\"{u}r die ASCII-Kopie der Datenbank +FileSet { + Name = "Catalog" + Include { + Options { + signature=MD5 + } + File = \lt{}working_directory\gt{}/bacula.sql + } +} +\end{verbatim} +\normalsize + +Stellen Sie sicher, dass, wie in dem Beispiel, eine Bootstrap-Datei geschrieben wird. +Bevorzugterweise wird eine Kopie dieser Bootstrap-Datei auf einem andern Computer gespeichert. +Dies erlaubt eine schnelle Wiederherstellung der Datenbank, falls erforderlich. Wenn Sie +keine Bootstrap-Datei haben, ist es trotzdem m\"{o}glich, erfordert aber mehr Arbeit und dauert l\"{a}nger. + + +\label{BackingUpBaculaSecurityConsiderations} +\section{Sicherheitsaspekte} +\index[general]{Sicherung der Bacula Datenbank - Sicherheitsaspekte } +\index[general]{Datenbank!Sicherung der Bacula Datenbank - Sicherheitsaspekte } + +Das Script make\_catalog\_backup wird als Beispiel bereitgestellt, wie Sie Ihre +Bacula Datenbank sichern k\"{o}nnen. Wir erwarten das Sie, entsprechend Ihrer Situation, +Vorsichtsma{\ss}nahmen treffen. +make\_catalog\_backup ist so ausgelegt, dass das Passwort auf der Kommandozeile \"{u}bergeben wird. +Das ist in Ordnung, solange sich nur vertrauensw\"{u}rdige Benutzer am System anmelden k\"{o}nnen, +ansonsten ist es inakzeptabel. Die meisten Datenbanksysteme bieten eine alternative Methode an, +um das Passwort nicht auf der Kommandozeile \"{u}bergeben zu m\"{u}ssen. + +Das Script make\_catalog\_backup enth\"{a}lt einige Warnungen dies betreffend. Bitte lesen Sie +die Kommentare im Script. + +Bei PostgreSQL k\"{o}nnen Sie z.B. eine Passwort-Datei verwenden, siehe +\elink{.pgpass}{http://www.postgresql.org/docs/8.2/static/libpq-pgpass.html}, +und MySQL hat die \elink{ .my.cnf}{http://dev.mysql.com/doc/refman/5.1/de/password-security.html}. + +Wir hoffen, dass wir Ihnen damit etwas helfen konnten, +aber nur Sie k\"{o}nenn beurteilen, was in Ihrer Situation erforderlich ist. + + +\label{BackingUPOtherDBs} +\section{Sicherung anderer Datenbanken} +\index[general]{Sicherung anderer Datenbanken } +\index[general]{Datenbanken!Sicherung anderer } + +Wie oben schon erw\"{a}hnt wurde, f\"{u}hrt das Sichern von Datenbank-Dateien im laufenden Betrieb +dazu, dass die gesicherten Dateien sich wahrscheinlich in einem inkonsistenten Zustand befinden. + +Die beste L\"{o}sung daf\"{u}r ist, die Datenbank vor der Sicherung zu stoppen, +oder datenbankspezifische Hilfsprogramme zu verwenden, um eine g\"{u}ltige Sicherungsdatei zu erstellen, +die Bacula dann auf die Volumes schreiben kann. Wenn Sie unsicher sind, wie Sie das am besten mit der +von Ihnen benutzten Datenbank erreichen k\"{o}nnen, hilft Ihnen eventuell die Webseite von Backup Central +weiter. Auf \elink{ Free Backup and Recovery Software}{http://www.backupcentral.com/toc-free-backup-software.html} +finden Sie Links zu Scripts die zeigen, wie man die meisten gr\"{o}{\ss}eren Datenbanken sichern kann. + +\label{Size} + +\section{Datenbank Gr\"{o}{\ss}e} +\index[general]{Gr\"{o}{\ss}e!Datenbank } +\index[general]{Datenbank Gr\"{o}{\ss}e } + +Wenn Sie nicht automatisch alte Datens\"{a}tze aus Ihrer Katalog-Datenbank l\"{o}schen lassen, +wird Ihre Datenbank mit jedem gelaufenen Backup-Job wachsen (siehe auch weiter oben). +Normalerweise sollten Sie sich entscheiden, wie lange Sie die Datei-Eintr\"{a}ge im Katalog +aufbewaren wollen und die {\bf File Retention} entsprechend konfigurieren. Dann k\"{o}nnen Sie +entweder abwarten wie gro{\ss} Ihre Katalog-Datenbank werden wird, oder es aber auch unge\"{a}hr +berechnen. Dazu m\"{u}ssen Sie wissen, dass f\"{u}r jede gesicherte Datei in etwa 154 Bytes in der +Katalog-Datenbank belegt werden und wieviele Dateien Sie auf wievielen Computern sichern werden. + +Ein Beispiel: angenommen Sie sichern zwei Computer, jeder mit 100.000 Dateien. +Weiterhin angenommen, Sie machen ein w\"{o}chentliches Full-Backup und ein +inkrementelles jeden Tag, wobei bei einem inkrementellen Backup typischerweise 4.000 Dateien +gesichert werden. Die ungef\"{a}hre Gr\"{o}{\ss}e Ihrer Datenbank nach einem Monat +kann dann so berechnet werden: + +\footnotesize +\begin{verbatim} + Gr\"{o}{\ss}e = 154 * Anzahl Computer * (100.000 * 4 + 10.000 * 26) +\end{verbatim} +\normalsize + +wenn ein Monat mit 4 Wochen angenommen wird, werden also 26 inkrementelle Backups im Monat laufen. +Das ergibt das folgende: +\footnotesize +\begin{verbatim} + Gr\"{o}{\ss}e = 154 * 2 * (100.000 * 4 + 10.000 * 26) +or + Gr\"{o}{\ss}e = 308 * (400.000 + 260.000) +or + Gr\"{o}{\ss}e = 203.280.000 Bytes +\end{verbatim} +\normalsize + +f\"{u}r die beiden oben angenommen Computer k\"{o}nnen wir also davon ausgehen, dass die Datenbank +in etwa 200 Megabytes gro{\ss} wird. Nat\"{u}rlich h\"{a}ngt dieser Wert davon ab, wieviele +Dateien wirklich gesichert werden. + +Unten sehen Sie ein paar Statistiken f\"{u}r eine MySQL-Datenbank die +Job-Eintr\"{a}ge f\"{u}r 5 Clients \"{u}ber 8.5 Monate und +Datei-Eintr\"{a}ge \"{u}ber 80 Tage enth\"{a}lt (\"{a}ltere +Datei-Eintr\"{a}ge wurden schon gel\"{o}scht). Bei diesen 5 Clients wurden +nur die Benutzer- und System-Dateien gesichert, die sich st\"{a}ndig +\"{a}ndern. Bei allen anderen Dateien wird angenommen, dass sie leicht aus +den Software-Paketen des Betriebssystems wiederherstellbar sind. + +In der Liste sind die Dateien (die den MySQL-Tabellen entsprechen) mit der Endung .MYD +die, die die eigentlichen Daten enthalten und die mit der Endung .MYI enthalten die Indexe. + +Sie werden bemerken, dass die meisten Eintr\"{a}ge in der Datei File.MYD +(die die Datei-Attribute enth\"{a}lt) enthalten sind und diese auch den +meisten Platz auf der Festplatte belegt. Die {\bf File Retention} (der Aufbewahrungszeitraum +f\"{u}r Dateien) ist also im wesentlichen daf\"{u}r verantwortlich, wie gro{\ss} die Datenbank wird. +Eine kurze Berechnung zeigt, dass die Datenbank mit jeder gesicherten Datei ungef\"{a}hr um +154 Bytes w\"{a}chst. + +\footnotesize +\begin{verbatim} +Gr\"{o}{\ss}e + in Bytes Eintr\"{a}ge Dateiname + ============ ========= =========== + 168 5 Client.MYD + 3,072 Client.MYI + 344,394,684 3,080,191 File.MYD + 115,280,896 File.MYI + 2,590,316 106,902 Filename.MYD + 3,026,944 Filename.MYI + 184 4 FileSet.MYD + 2,048 FileSet.MYI + 49,062 1,326 JobMedia.MYD + 30,720 JobMedia.MYI + 141,752 1,378 Job.MYD + 13,312 Job.MYI + 1,004 11 Media.MYD + 3,072 Media.MYI + 1,299,512 22,233 Path.MYD + 581,632 Path.MYI + 36 1 Pool.MYD + 3,072 Pool.MYI + 5 1 Version.MYD + 1,024 Version.MYI +\end{verbatim} +\normalsize + +Die Datenbank hat eine Gr\"{o}{\ss}e von ca. 450 Megabytes.. + +H\"{a}tten wir SQLite genommen, w\"{a}re die Bestimmung der Datenbankgr\"{o}{\ss}e +viel einfacher gewesen, da SQLite alle Daten in einer einzigen Datei speichert, +dann aber h\"{a}tten wir nicht so einfach erkennen k\"{o}nnen, welche der Tabellen +den meisten Speicherplatz ben\"{o}tigt. + +SQLite Datenbanken k\"{o}nnen bis zu 50 \% gr\"{o}{\ss}er sein als MySQL-Datenbanken +(bei gleichem Datenbestand), weil bei SQLite alle Daten als ASCII-Zeichenketten gespeichert werden. +Sogar bin\"{a}re Daten werden als ASCII-Zeichenkette dargestellt, und das scheint den Speicherverbrauch +zu erh\"{o}hen. diff --git a/docs/manuals/de/catalog/check_tex.pl b/docs/manuals/de/catalog/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/catalog/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/catalog/do_echo b/docs/manuals/de/catalog/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/de/catalog/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/de/catalog/fdl.tex b/docs/manuals/de/catalog/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/de/catalog/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/catalog/fix_tex.pl b/docs/manuals/de/catalog/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/catalog/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/catalog/index.perl b/docs/manuals/de/catalog/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/catalog/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/catalog/internaldb.tex b/docs/manuals/de/catalog/internaldb.tex new file mode 100644 index 00000000..65cd0ea0 --- /dev/null +++ b/docs/manuals/de/catalog/internaldb.tex @@ -0,0 +1,76 @@ +%% +%% + +\chapter{The internal database is not supported, please do not +use it.} +\label{InternalDbChapter} +\index[general]{Use it!The internal database is not supported please +do not } +\index[general]{The internal database is not supported, please do not +use it. } + +\section{Internal Bacula Database} +\index[general]{Internal Bacula Database } +\index[general]{Database!Internal Bacula } + +Previously it was intended to be used primarily by Bacula developers for +testing; although SQLite is also a good choice for this. We do not recommend +its use in general. + +This database is simplistic in that it consists entirely of Bacula's internal +structures appended sequentially to a file. Consequently, it is in most cases +inappropriate for sites with many clients or systems with large numbers of +files, or long-term production environments. + +Below, you will find a table comparing the features available with SQLite and +MySQL and with the internal Bacula database. At the current time, you cannot +dynamically switch from one to the other, but must rebuild the Bacula source +code. If you wish to experiment with both, it is possible to build both +versions of Bacula and install them into separate directories. + +\addcontentsline{lot}{table}{SQLite vs MySQL Database Comparison} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Feature } & \multicolumn{1}{c| }{\bf SQLite or MySQL + } & \multicolumn{1}{c| }{\bf Bacula } \\ + \hline +{Job Record } & {Yes } & {Yes } \\ + \hline +{Media Record } & {Yes } & {Yes } \\ + \hline +{FileName Record } & {Yes } & {No } \\ + \hline +{File Record } & {Yes } & {No } \\ + \hline +{FileSet Record } & {Yes } & {Yes } \\ + \hline +{Pool Record } & {Yes } & {Yes } \\ + \hline +{Client Record } & {Yes } & {Yes } \\ + \hline +{JobMedia Record } & {Yes } & {Yes } \\ + \hline +{List Job Records } & {Yes } & {Yes } \\ + \hline +{List Media Records } & {Yes } & {Yes } \\ + \hline +{List Pool Records } & {Yes } & {Yes } \\ + \hline +{List JobMedia Records } & {Yes } & {Yes } \\ + \hline +{Delete Pool Record } & {Yes } & {Yes } \\ + \hline +{Delete Media Record } & {Yes } & {Yes } \\ + \hline +{Update Pool Record } & {Yes } & {Yes } \\ + \hline +{Implement Verify } & {Yes } & {No } \\ + \hline +{MD5 Signatures } & {Yes } & {No } +\\ \hline + +\end{longtable} + +In addition, since there is no SQL available, the Console commands: {\bf +sqlquery}, {\bf query}, {\bf retention}, and any other command that directly +uses SQL are not available with the Internal database. diff --git a/docs/manuals/de/catalog/latex2html-init.pl b/docs/manuals/de/catalog/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/catalog/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/catalog/mysql.tex b/docs/manuals/de/catalog/mysql.tex new file mode 100644 index 00000000..75cc6f0e --- /dev/null +++ b/docs/manuals/de/catalog/mysql.tex @@ -0,0 +1,286 @@ +%% +%% + +\chapter{Installing and Configuring MySQL} +\label{MySqlChapter} +\index[general]{MySQL!Installing and Configuring } +\index[general]{Installing and Configuring MySQL } + +\section{Installing and Configuring MySQL -- Phase I} +\index[general]{Installing and Configuring MySQL -- Phase I } +\index[general]{Phase I!Installing and Configuring MySQL -- } + +If you use the ./configure \verb:--:with-mysql=mysql-directory statement for +configuring {\bf Bacula}, you will need MySQL version 4.1 or later installed +in the {\bf mysql-directory}. If you are using one of the new modes such as +ANSI/ISO compatibility, you may experience problems. + +If MySQL is installed in the standard system location, you need only enter +{\bf \verb:--:with-mysql} since the configure program will search all the +standard locations. If you install MySQL in your home directory or some +other non-standard directory, you will need to provide the full path to it. + +Installing and Configuring MySQL is not difficult but can be confusing the +first time. As a consequence, below, we list the steps that we used to install +it on our machines. Please note that our configuration leaves MySQL without +any user passwords. This may be an undesirable situation if you have other +users on your system. + +The notes below describe how to build MySQL from the source tar files. If +you have a pre-installed MySQL, you can return to complete the installation +of Bacula, then come back to Phase II of the MySQL installation. If you +wish to install MySQL from rpms, you will probably need to install +the following: + +\footnotesize +\begin{verbatim} +mysql-.rpm +mysql-server-.rpm +mysql-devel-.rpm +\end{verbatim} +\normalsize +The names of the packages may vary from distribution to +distribution. It is important to have the devel package loaded as +it contains the libraries and header files necessary to build +Bacula. There may be additional packages that are required to +install the above, for example, zlib and openssl. + +Once these packages are installed, you will be able to build Bacula (using +the files installed with the mysql package, then run MySQL using the +files installed with mysql-server. If you have installed MySQL by rpms, +please skip Phase I below, and return to complete the installation of +Bacula, then come back to Phase II of the MySQL installation when indicated +to do so. + +Beginning with Bacula version 1.31, the thread safe version of the +MySQL client library is used, and hence you should add the {\bf +\verb:--:enable-thread-safe-client} option to the {\bf +./configure} as shown below: + +\begin{enumerate} +\item Download MySQL source code from + \elink{www.mysql.com/downloads}{http://www.mysql.com/downloads} + +\item Detar it with something like: + + {\bf tar xvfz mysql-filename} + +Note, the above command requires GNU tar. If you do not have GNU tar, a +command such as: + +{\bf zcat mysql-filename | tar xvf - } + +will probably accomplish the same thing. + +\item cd {\bf mysql-source-directory} + + where you replace {\bf mysql-source-directory} with the directory name where + you put the MySQL source code. + +\item ./configure \verb:--:enable-thread-safe-client \verb:--:prefix=mysql-directory + + where you replace {\bf mysql-directory} with the directory name where you + want to install mysql. Normally for system wide use this is /usr/local/mysql. + In my case, I use \~{}kern/mysql. + +\item make + + This takes a bit of time. + +\item make install + + This will put all the necessary binaries, libraries and support files into + the {\bf mysql-directory} that you specified above. + +\item ./scripts/mysql\_install\_db + + This will create the necessary MySQL databases for controlling user access. +Note, this script can also be found in the {\bf bin} directory in the +installation directory + +\end{enumerate} + +The MySQL client library {\bf mysqlclient} requires the gzip compression +library {\bf libz.a} or {\bf libz.so}. If you are using rpm packages, these +libraries are in the {\bf libz-devel} package. On Debian systems, you will +need to load the {\bf zlib1g-dev} package. If you are not using rpms or debs, +you will need to find the appropriate package for your system. + +At this point, you should return to completing the installation of {\bf +Bacula}. Later after Bacula is installed, come back to this chapter to +complete the installation. Please note, the installation files used in the +second phase of the MySQL installation are created during the Bacula +Installation. + +\label{mysql_phase2} +\section{Installing and Configuring MySQL -- Phase II} +\index[general]{Installing and Configuring MySQL -- Phase II } +\index[general]{Phase II!Installing and Configuring MySQL -- } + +At this point, you should have built and installed MySQL, or already have a +running MySQL, and you should have configured, built and installed {\bf +Bacula}. If not, please complete these items before proceeding. + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-mysql=mysql-directory}, where {\bf mysql-directory} is the +directory name that you specified on the ./configure command for configuring +MySQL. This is needed so that Bacula can find the necessary include headers +and library files for interfacing to MySQL. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_mysql\_database. The *\_bacula\_* files are provided for +convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +Now you will create the Bacula MySQL database and the tables that Bacula uses. + + +\begin{enumerate} +\item Start {\bf mysql}. You might want to use the {\bf startmysql} script + provided in the Bacula release. + +\item cd \lt{}install-directory\gt{} + This directory contains the Bacula catalog interface routines. + +\item ./grant\_mysql\_privileges + This script creates unrestricted access rights for the user {\bf bacula}. + You may want to modify it to suit your situation. Please + note that none of the userids, including root, are password protected. + If you need more security, please assign a password to the root user + and to bacula. The program {\bf mysqladmin} can be used for this. + +\item ./create\_mysql\_database + This script creates the MySQL {\bf bacula} database. The databases you + create as well as the access databases will be located in + \lt{}install-dir\gt{}/var/ in a subdirectory with the name of the + database, where \lt{}install-dir\gt{} is the directory name that you + specified on the {\bf \verb:--:prefix} option. This can be important to + know if you want to make a special backup of the Bacula database or to + check its size. + +\item ./make\_mysql\_tables + This script creates the MySQL tables used by {\bf Bacula}. +\end{enumerate} + +Each of the three scripts (grant\_mysql\_privileges, create\_mysql\_database +and make\_mysql\_tables) allows the addition of a command line argument. This +can be useful for specifying the user and or password. For example, you might +need to add {\bf -u root} to the command line to have sufficient privilege to +create the Bacula tables. + +To take a closer look at the access privileges that you have setup with the +above, you can do: + +\footnotesize +\begin{verbatim} +mysql-directory/bin/mysql -u root mysql +select * from user; +\end{verbatim} +\normalsize + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_mysql_tables + ./make_mysql_tables +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. + +\section{Linking Bacula with MySQL} +\index[general]{Linking Bacula with MySQL } +\index[general]{MySQL!Linking Bacula with } +\index[general]{Upgrading} + +After configuring Bacula with + +./configure \verb:--:enable-thread-safe-client \verb:--:prefix=\lt{}mysql-directory\gt{} +where \lt{}mysql-directory\gt{} is in my case {\bf /home/kern/mysql}, you may +have to configure the loader so that it can find the MySQL shared libraries. +If you have previously followed this procedure and later add the {\bf +\verb:--:enable-thread-safe-client} options, you will need to rerun the {\bf +ldconfig} program shown below. If you put MySQL in a standard place such as +{\bf /usr/lib} or {\bf /usr/local/lib} this will not be necessary, but in my +case it is. The description that follows is Linux specific. For other +operating systems, please consult your manuals on how to do the same thing: + +First edit: {\bf /etc/ld.so.conf} and add a new line to the end of the file +with the name of the mysql-directory. In my case, it is: + +/home/kern/mysql/lib/mysql then rebuild the loader's cache with: + +/sbin/ldconfig If you upgrade to a new version of {\bf MySQL}, the shared +library names will probably change, and you must re-run the {\bf +/sbin/ldconfig} command so that the runtime loader can find them. + +Alternatively, your system my have a loader environment variable that can be +set. For example, on a Solaris system where I do not have root permission, I +use: + +LD\_LIBRARY\_PATH=/home/kern/mysql/lib/mysql + +Finally, if you have encryption enabled in MySQL, you may need to add {\bf +-lssl -lcrypto} to the link. In that case, you can either export the +appropriate LDFLAGS definition, or alternatively, you can include them +directly on the ./configure line as in: + +\footnotesize +\begin{verbatim} +LDFLAGS="-lssl -lcyrpto" \ + ./configure \ + +\end{verbatim} +\normalsize + +\section{Installing MySQL from RPMs} +\index[general]{MySQL!Installing from RPMs} +\index[general]{Installing MySQL from RPMs} +If you are installing MySQL from RPMs, you will need to install +both the MySQL binaries and the client libraries. The client +libraries are usually found in a devel package, so you must +install: + +\footnotesize +\begin{verbatim} + mysql + mysql-devel +\end{verbatim} +\normalsize + +This will be the same with most other package managers too. + +\section{Upgrading MySQL} +\index[general]{Upgrading MySQL } +\index[general]{Upgrading!MySQL } +\index[general]{Upgrading} +If you upgrade MySQL, you must reconfigure, rebuild, and re-install +Bacula otherwise you are likely to get bizarre failures. If you +install from rpms and you upgrade MySQL, you must also rebuild Bacula. +You can do so by rebuilding from the source rpm. To do so, you may need +to modify the bacula.spec file to account for the new MySQL version. diff --git a/docs/manuals/de/catalog/postgresql.tex b/docs/manuals/de/catalog/postgresql.tex new file mode 100644 index 00000000..15be98e9 --- /dev/null +++ b/docs/manuals/de/catalog/postgresql.tex @@ -0,0 +1,460 @@ +%% +%% + +\chapter{Installing and Configuring PostgreSQL} +\label{PostgreSqlChapter} +\index[general]{PostgreSQL!Installing and Configuring } +\index[general]{Installing and Configuring PostgreSQL } +\index[general]{Upgrading} + +If you are considering using PostreSQL, you should be aware +of their philosophy of upgrades, which could be +destabilizing for a production shop. Basically at every major version +upgrade, you are required to dump your database in an ASCII format, +do the upgrade, and then reload your database (or databases). This is +because they frequently update the "data format" from version to +version, and they supply no tools to automatically do the conversion. +If you forget to do the ASCII dump, your database may become totally +useless because none of the new tools can access it due to the format +change, and the PostgreSQL server will not be able to start. + +If you are building PostgreSQL from source, please be sure to add +the {\bf \verb:--:enable-thread-safety} option when doing the ./configure +for PostgreSQL. + +\section{Installing PostgreSQL} +\index[general]{PostgreSQL!Installing } + +If you use the {\bf ./configure \verb:--:with-postgresql=PostgreSQL-Directory} +statement for configuring {\bf Bacula}, you will need PostgreSQL version 7.4 +or later installed. NOTE! PostgreSQL versions earlier than 7.4 do not work +with Bacula. If PostgreSQL is installed in the standard system location, you +need only enter {\bf \verb:--:with-postgresql} since the configure program will +search all the standard locations. If you install PostgreSQL in your home +directory or some other non-standard directory, you will need to provide the +full path with the {\bf \verb:--:with-postgresql} option. + +Installing and configuring PostgreSQL is not difficult but can be confusing +the first time. If you prefer, you may want to use a package provided by your +chosen operating system. Binary packages are available on most PostgreSQL +mirrors. + +If you prefer to install from source, we recommend following the instructions +found in the +\elink{PostgreSQL documentation}{http://www.postgresql.org/docs/}. + +If you are using FreeBSD, +\elink{this FreeBSD Diary article}{http://www.freebsddiary.org/postgresql.php} +will be useful. Even if you are not using FreeBSD, the article will contain +useful configuration and setup information. + +If you configure the Batch Insert code in Bacula (attribute inserts are +10 times faster), you {\bf must} be using a PostgreSQL that was built with +the {\bf \verb:--:enable-thread-safety} option, otherwise you will get +data corruption. Most major Linux distros have thread safety turned on, but +it is better to check. One way is to see if the PostgreSQL library that +Bacula will be linked against references pthreads. This can be done +with a command such as: + +\footnotesize +\begin{verbatim} + nm /usr/lib/libpq.a | grep pthread_mutex_lock +\end{verbatim} +\normalsize + +The above command should print a line that looks like: + +\footnotesize +\begin{verbatim} + U pthread_mutex_lock +\end{verbatim} +\normalsize + +if does, then everything is OK. If it prints nothing, do not enable batch +inserts when building Bacula. + +After installing PostgreSQL, you should return to completing the installation +of {\bf Bacula}. Later, after Bacula is installed, come back to this chapter +to complete the installation. Please note, the installation files used in the +second phase of the PostgreSQL installation are created during the Bacula +Installation. You must still come back to complete the second phase of the +PostgreSQL installation even if you installed binaries (e.g. rpm, deb, +...). + + +\label{PostgreSQL_configure} +\section{Configuring PostgreSQL} +\index[general]{PostgreSQL!Configuring PostgreSQL -- } + +At this point, you should have built and installed PostgreSQL, or already have +a running PostgreSQL, and you should have configured, built and installed {\bf +Bacula}. If not, please complete these items before proceeding. + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-postgresql=PostgreSQL-directory}, where {\bf +PostgreSQL-directory} is the directory name that you specified on the +./configure command for configuring PostgreSQL (if you didn't specify a +directory or PostgreSQL is installed in a default location, you do not need to +specify the directory). This is needed so that Bacula can find the necessary +include headers and library files for interfacing to PostgreSQL. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_postgresql\_database. The *\_bacula\_* files are +provided for convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +Now you will create the Bacula PostgreSQL database and the tables that Bacula +uses. These instructions assume that you already have PostgreSQL running. You +will need to perform these steps as a user that is able to create new +databases. This can be the PostgreSQL user (on most systems, this is the pgsql +user). + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} + + This directory contains the Bacula catalog interface routines. + +\item ./create\_bacula\_database + + This script creates the PostgreSQL {\bf bacula} database. + Before running this command, you should carefully think about + what encoding sequence you want for the text fields (paths, files, ...). + Ideally, the encoding should be set to UTF8. However, many Unix systems + have filenames that are not encoded in UTF8, either because you have + not set UTF8 as your default character set or because you have imported + files from elsewhere (e.g. MacOS X). For this reason, Bacula uses + SQL\_ASCII as the default encoding. If you want to change this, + please modify the script before running it, but be forewarned that + Bacula backups will fail if PostgreSQL finds any non-UTF8 sequences. + + If running the script fails, it is probably because the database is + owned by a user other than yourself. On many systems, the database + owner is {\bf pgsql} and on others such as Red Hat and Fedora it is {\bf + postgres}. You can find out which it is by examining your /etc/passwd + file. To create a new user under either your name or with say the name + {\bf bacula}, you can do the following: + +\begin{verbatim} + su + (enter root password) + su pgsql (or postgres) + createuser kern (or perhaps bacula) + Shall the new user be allowed to create databases? (y/n) y + Shall the new user be allowed to create more new users? (y/n) (choose + what you want) + exit +\end{verbatim} + + At this point, you should be able to execute the + ./create\_bacula\_database command. + +\item ./make\_bacula\_tables + + This script creates the PostgreSQL tables used by {\bf Bacula}. +\item ./grant\_bacula\_privileges + + This script creates the database user {\bf bacula} with restricted access +rights. You may want to modify it to suit your situation. Please note that +this database is not password protected. + +\end{enumerate} + +Each of the three scripts (create\_bacula\_database, make\_bacula\_tables, and +grant\_bacula\_privileges) allows the addition of a command line argument. +This can be useful for specifying the user name. For example, you might need +to add {\bf -h hostname} to the command line to specify a remote database +server. + +To take a closer look at the access privileges that you have setup with the +above, you can do: + +\footnotesize +\begin{verbatim} +PostgreSQL-directory/bin/psql --command \\dp bacula +\end{verbatim} +\normalsize + +Also, I had an authorization problem with the password. In the end, +I had to modify my {\bf pg\_hba.conf} file (in /var/lib/pgsql/data on my machine) +from: + +\footnotesize +\begin{verbatim} + local all all ident sameuser +to + local all all trust sameuser +\end{verbatim} +\normalsize + +This solved the problem for me, but it is not always a good thing +to do from a security standpoint. However, it allowed me to run +my regression scripts without having a password. + +A more secure way to perform database authentication is with md5 +password hashes. Begin by editing the {\bf pg\_hba.conf} file, and +just prior the the existing ``local'' and ``host'' lines, add the line: + +\footnotesize +\begin{verbatim} + local bacula bacula md5 +\end{verbatim} +\normalsize + +and restart the Postgres database server (frequently, this can be done +using "/etc/init.d/postgresql restart" or "service postgresql restart") to +put this new authentication rule into effect. + +Next, become the Postgres administrator, postgres, either by logging +on as the postgres user, or by using su to become root and then using +su - postgres to become postgres. Add a password to the bacula +database for the bacula user using: + +\footnotesize +\begin{verbatim} + \$ psql bacula + bacula=# alter user bacula with password 'secret'; + ALTER USER + bacula=# \\q +\end{verbatim} +\normalsize + +You'll have to add this password to two locations in the +bacula-dir.conf file: once to the Catalog resource and once to the +RunBeforeJob entry in the BackupCatalog Job resource. With the +password in place, these two lines should look something like: + +\footnotesize +\begin{verbatim} + dbname = bacula; user = bacula; password = "secret" + ... and ... + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/etc/make_catalog_backup bacula bacula secret" +\end{verbatim} +\normalsize + +Naturally, you should choose your own significantly more random +password, and ensure that the bacula-dir.conf file containing this +password is readable only by the root. + +Even with the files containing the database password properly +restricted, there is still a security problem with this approach: on +some platforms, the environment variable that is used to supply the +password to Postgres is available to all users of the +local system. To eliminate this problem, the Postgres team have +deprecated the use of the environment variable password-passing +mechanism and recommend the use of a .pgpass file instead. To use +this mechanism, create a file named .pgpass containing the single +line: + +\footnotesize +\begin{verbatim} + localhost:5432:bacula:bacula:secret +\end{verbatim} +\normalsize + +This file should be copied into the home directory of all accounts +that will need to gain access to the database: typically, root, +bacula, and any users who will make use of any of the console +programs. The files must then have the owner and group set to match +the user (so root:root for the copy in ~root, and so on), and the mode +set to 600, limiting access to the owner of the file. + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_bacula_tables + ./make_bacula_tables + ./grant_bacula_privileges +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. + +\section{Installing PostgreSQL from RPMs} +\index[general]{PostgreSQL!Installing from RPMs} +\index[general]{Installing PostgreSQL from RPMs} +If you are installing PostgreSQL from RPMs, you will need to install +both the PostgreSQL binaries and the client libraries. The client +libraries are usually found in a devel package, so you must +install: + +\footnotesize +\begin{verbatim} + postgresql + postgresql-devel + postgresql-server + postgresql-libs +\end{verbatim} +\normalsize + +These will be similar with most other package managers too. After +installing from rpms, you will still need to run the scripts that set up +the database and create the tables as described above. + + +\section{Converting from MySQL to PostgreSQL} +\index[general]{PostgreSQL!Converting from MySQL to } +\index[general]{Converting from MySQL to PostgreSQL } + +The conversion procedure presented here was worked out by Norm Dressler +\lt{}ndressler at dinmar dot com\gt{} + +This process was tested using the following software versions: + +\begin{itemize} +\item Linux Mandrake 10/Kernel 2.4.22-10 SMP +\item Mysql Ver 12.21 Distrib 4.0.15, for mandrake-linux-gnu (i586) +\item PostgreSQL 7.3.4 +\item Bacula 1.34.5 + \end{itemize} + +WARNING: Always as a precaution, take a complete backup of your databases +before proceeding with this process! + +\begin{enumerate} +\item Shutdown bacula (cd /etc/bacula;./bacula stop) +\item Run the following command to dump your Mysql database: + + \footnotesize +\begin{verbatim} + mysqldump -f -t -n >bacula-backup.dmp + +\end{verbatim} +\normalsize + +\item Make a backup of your /etc/bacula directory (but leave the original in + place). +\item Go to your Bacula source directory and rebuild it to include PostgreSQL + support rather then Mysql support. Check the config.log file for your + original configure command and replace enable-mysql with enable-postgresql. +\item Recompile Bacula with a make and if everything compiles completely, + perform a make install. +\item Shutdown Mysql. +\item Start PostgreSQL on your system. +\item Create a bacula user in Postgres with the createuser command. Depending on + your Postgres install, you may have to SU to the user who has privileges to + create a user. +\item Verify your pg\_hba.conf file contains sufficient permissions to allow + bacula to access the server. Mine has the following since it's on a secure + network: + +\footnotesize +\begin{verbatim} +local all all trust + +host all all 127.0.0.1 255.255.255.255 trust + +NOTE: you should restart your postgres server if you + made changes + +\end{verbatim} +\normalsize + +\item Change into the /etc/bacula directory and prepare the database and + tables with the following commands: + +\footnotesize +\begin{verbatim} +./create_postgresql_database + +./make_postgresql_tables + +./grant_postgresql_privileges + +\end{verbatim} +\normalsize + +\item Verify you have access to the database: + + \footnotesize +\begin{verbatim} + +psql -Ubacula bacula + +\end{verbatim} +\normalsize + +You should not get any errors. +\item Load your database from the Mysql database dump with: + + \footnotesize +\begin{verbatim} +psql -Ubacula bacula + +\end{verbatim} +\normalsize + +\item Resequence your tables with the following commands: + + \footnotesize +\begin{verbatim} +psql -Ubacula bacula + +SELECT SETVAL('basefiles_baseid_seq', (SELECT +MAX(baseid) FROM basefiles)); +SELECT SETVAL('client_clientid_seq', (SELECT +MAX(clientid) FROM client)); +SELECT SETVAL('file_fileid_seq', (SELECT MAX(fileid) +FROM file)); +SELECT SETVAL('filename_filenameid_seq', (SELECT +MAX(filenameid) FROM filename)); + +SELECT SETVAL('fileset_filesetid_seq', (SELECT +MAX(filesetid) FROM fileset)); + +SELECT SETVAL('job_jobid_seq', (SELECT MAX(jobid) FROM job)); +SELECT SETVAL('jobmedia_jobmediaid_seq', (SELECT +MAX(jobmediaid) FROM jobmedia)); +SELECT SETVAL('media_mediaid_seq', (SELECT MAX(mediaid) FROM media)); +SELECT SETVAL('path_pathid_seq', (SELECT MAX(pathid) FROM path)); + +SELECT SETVAL('pool_poolid_seq', (SELECT MAX(poolid) FROM pool)); + +\end{verbatim} +\normalsize + +\item At this point, start up Bacula, verify your volume library and perform + a test backup to make sure everything is working properly. +\end{enumerate} + +\section{Upgrading PostgreSQL} +\index[general]{Upgrading PostgreSQL } +\index[general]{Upgrading!PostgreSQL } +\index[general]{Upgrading} +If you upgrade PostgreSQL, you must reconfigure, rebuild, and re-install +Bacula otherwise you are likely to get bizarre failures. If you +to modify the bacula.spec file to account for the new PostgreSQL version. +You can do so by rebuilding from the source rpm. To do so, you may need +install from rpms and you upgrade PostgreSQL, you must also rebuild Bacula. + + +\section{Credits} +\index[general]{Credits } +Many thanks to Dan Langille for writing the PostgreSQL driver. This will +surely become the most popular database that Bacula supports. diff --git a/docs/manuals/de/catalog/setup.sm b/docs/manuals/de/catalog/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/catalog/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/catalog/sqlite.tex b/docs/manuals/de/catalog/sqlite.tex new file mode 100644 index 00000000..a5ef8790 --- /dev/null +++ b/docs/manuals/de/catalog/sqlite.tex @@ -0,0 +1,168 @@ +%% +%% + +\chapter{Installing and Configuring SQLite} +\label{SqlLiteChapter} +\index[general]{Installing and Configuring SQLite } +\index[general]{SQLite!Installing and Configuring } + +Please note that SQLite both versions 2 and 3 are not network enabled, +which means that they must be linked into the Director rather than accessed +by the network as MySQL and PostgreSQL are. This has two consequences: +\begin{enumerate} +\item SQLite cannot be used in the {\bf bweb} web GUI package. +\item If you use SQLite, and your Storage daemon is not on the same +machine as your Director, you will need to transfer your database +to the Storage daemon's machine before you can use any of the SD tools +such as {\bf bscan}, ... +\end{enumerate} + +\section{Installing and Configuring SQLite -- Phase I} +\index[general]{Phase I!Installing and Configuring SQLite -- } +\index[general]{Installing and Configuring SQLite -- Phase I } + +If you use the {\bf ./configure \verb:--:with-sqlite} statement for configuring {\bf +Bacula}, you will need SQLite version 2.8.16 or later installed. Our standard +location (for the moment) for SQLite is in the dependency package {\bf +depkgs/sqlite-2.8.16}. Please note that the version will be updated as new +versions are available and tested. + +Installing and Configuring is quite easy. + +\begin{enumerate} +\item Download the Bacula dependency packages +\item Detar it with something like: + + {\bf tar xvfz depkgs.tar.gz} + + Note, the above command requires GNU tar. If you do not have GNU tar, a + command such as: + + {\bf zcat depkgs.tar.gz | tar xvf -} + + will probably accomplish the same thing. + +\item {\bf cd depkgs} + +\item {\bf make sqlite} + +\end{enumerate} + + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-sqlite} or {\bf \verb:--:with-sqlite3} depending +one which version of SQLite you are using. You should not use the {\bf +\verb:--:enable-batch-insert} configuration parameter for Bacula if you +are using SQLite version 2 as it is probably not thread safe. If you +are using SQLite version 3, you may use the {\bf \verb:--:enable-batch-insert} +configuration option with Bacula, but when building SQLite3 you MUST +configure it with {\bf \verb:--:enable-threadsafe} and +{\bf \verb:--:enable-cross-thread-connections}. + +By default, SQLite3 is now run with {\bf PRAGMA synchronous=OFF} this +increases the speed by more than 30 time, but it also increases the +possibility of a corrupted database if your server crashes (power failure +or kernel bug). If you want more security, you can change the PRAGMA +that is used in the file src/version.h. + + +At this point, you should return to completing the installation of {\bf +Bacula}. + + +\section{Installing and Configuring SQLite -- Phase II} +\label{phase2} +\index[general]{Phase II!Installing and Configuring SQLite -- } +\index[general]{Installing and Configuring SQLite -- Phase II } + +This phase is done {\bf after} you have run the {\bf ./configure} command to +configure {\bf Bacula}. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_sqlite\_database. The *\_bacula\_* files are provided +for convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +At this point, you can create the SQLite database and tables: + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} + + This directory contains the Bacula catalog interface routines. + +\item ./make\_sqlite\_tables + + This script creates the SQLite database as well as the tables used by {\bf + Bacula}. This script will be automatically setup by the {\bf ./configure} + program to create a database named {\bf bacula.db} in {\bf Bacula's} working + directory. +\end{enumerate} + +\section{Linking Bacula with SQLite} +\index[general]{SQLite!Linking Bacula with } +\index[general]{Linking Bacula with SQLite } + +If you have followed the above steps, this will all happen automatically and +the SQLite libraries will be linked into {\bf Bacula}. + +\section{Testing SQLite} +\index[general]{SQLite!Testing } +\index[general]{Testing SQLite } + +We have much less "production" experience using SQLite than using MySQL. +SQLite has performed flawlessly for us in all our testing. However, +several users have reported corrupted databases while using SQLite. For +that reason, we do not recommend it for production use. + +If Bacula crashes with the following type of error when it is started: +\footnotesize +\begin{verbatim} +Using default Catalog name=MyCatalog DB=bacula +Could not open database "bacula". +sqlite.c:151 Unable to open Database=/var/lib/bacula/bacula.db. +ERR=malformed database schema - unable to open a temporary database file +for storing temporary tables +\end{verbatim} +\normalsize + +this is most likely caused by the fact that some versions of +SQLite attempt to create a temporary file in the current directory. +If that fails, because Bacula does not have write permission on +the current directory, then you may get this errr. The solution is +to start Bacula in a current directory where it has write permission. + + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_sqlite_tables + ./make_sqlite_tables +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. diff --git a/docs/manuals/de/catalog/translate_images.pl b/docs/manuals/de/catalog/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/catalog/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/catalog/update_version b/docs/manuals/de/catalog/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/catalog/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/catalog/update_version.in b/docs/manuals/de/catalog/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/catalog/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/catalog/version.tex b/docs/manuals/de/catalog/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/catalog/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/catalog/version.tex.in b/docs/manuals/de/catalog/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/catalog/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/de/concepts/Makefile b/docs/manuals/de/concepts/Makefile new file mode 100644 index 00000000..61b86ed0 --- /dev/null +++ b/docs/manuals/de/concepts/Makefile @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=concepts + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Concepts and Overview Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Concep_Overvi_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/concepts/Makefile.in b/docs/manuals/de/concepts/Makefile.in new file mode 100644 index 00000000..61b86ed0 --- /dev/null +++ b/docs/manuals/de/concepts/Makefile.in @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=concepts + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Concepts and Overview Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Concep_Overvi_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/concepts/STYLE b/docs/manuals/de/concepts/STYLE new file mode 100644 index 00000000..6cd70564 --- /dev/null +++ b/docs/manuals/de/concepts/STYLE @@ -0,0 +1,76 @@ +TODO + +maybe spell out "config" to "configuration" as appropriate + +Use American versus British spelling + +not critical, but for later consider cleaning out some use of +"there" and rewrite to not be so passive. + +make sure use of \elink shows URL in printed book + +get rid of many references of "Red Hat" -- too platform specific? + +remove references to names, like "Dan Langille shared ..." +just put their names in credits for book + +don't refer to very old software by specific version such as +"Red Hat 7" or FreeBSD 4.9 because is too old to put in book. It may be +relevant, but may be confusing. Maybe just remove the version number +if applicable. + +maybe fine, but discuss point-of-view: don't use personal "I" or +possessive "my" unless that is consistent style for book. + +replace "32 bit" and "64 bit" with "32-bit" and "64-bit" respectively. +It seems like more popular style standard + +be consistent with "Note" and "NOTE". maybe use tex header for this + +get rid of redundant or noisy exclamation marks + +style for "ctl-alt-del" and "ctl-d"? and be consisten with formatting + +be consistent for case for ext3, ext2, EXT3, or EXT2. + +fix spelling of "inspite" in source and in docs (maybe use "regardless +in one place where I already changed to "in spite" + +be consistent with software names, like postgres, postgresql, PostreSQL +and others + +instead of using whitehouse for examples, use example.org (as that is defined +for that usage); also check other hostnames and maybe IPs and networks + +use section numbers and cross reference by section number or page number +no underlining in book (this is not the web :) + +some big gaps between paragraphs or between section headers and paragraphs +-- due to tex -- adjust as necessary to look nice + +don't include the GPL and LGPL in book. This will save 19 (A4) pages. +For 6x9 book this will save 30 pages. (Keep GFDL though.) + +many index items are too long + +appendices not listed as appendix + +some how consolidate indexes into one? on 6x9, the indexes are over 30 pages + +don't refer to some website without including URL also +(such as "this FreeBSD Diary article") + +get rid of (R) trademark symbols -- only use on first use; for example +don't put on the RPM Packaging FAQ + +split up very long paragraphs, such as "As mentioned above, you will need ..." +(on my page 783). + +use smaller font or split up long lines (especially from +console output which is wider than printed page) + +don't assume all BSD is "FreeBSD" + +don't assume all "kernel" is Linux. If it is Linux, be clear. + + diff --git a/docs/manuals/de/concepts/ansi-labels.tex b/docs/manuals/de/concepts/ansi-labels.tex new file mode 100644 index 00000000..7769d448 --- /dev/null +++ b/docs/manuals/de/concepts/ansi-labels.tex @@ -0,0 +1,60 @@ + +\chapter{ANSI und IBM Tape Labels} +\label{AnsiLabelsChapter} +\index[general]{ANSI und IBM Tape Labels} +\index[general]{Labels!Tape} + +Wenn Bacula entsprechend konfiguriert wird, unterst\"{u}tzt es ANSI und IBM Tape Labels. +Mit der richtigen Konfiguration, kann man Bacula sogar zwingen, +nur noch ANSI oder IBM Labels zu verwenden. + +Bacula kann ein ANSI oder IBM Tape Label erstellen, aber wenn Check Labels +konfiguriert ist (siehe unten), wird Bacula versuchen ein existierendes Tape Label zu finden, +und dieses dann verwenden. +Sie k\"{o}nnen die Tape Labels also mit anderen Programmen erstellen +und Bacula wird diese Labels erkennen und damit arbeiten. + +Obwohl Bacula ANSI und IBM Tape Labels erkennen und auch schreiben kann, +wird es immer auch ein eigenes Tape Label erzeugen. + +Wenn ANSI oder IBM Tape Labels verwenden werden, +d\"{u}rfen die Volume Namen nicht mehr als 6 Zeichen beinhalten. + +Wenn Sie Ihre Volumes nicht mit Bacula gelabelt haben, dann wird +das ANSI oder IBM Tape Label nur von Bacula erkannt, wenn Sie das +HDR1 Label mit {\bf BACULA.DATA} im Dateinamen (beginnend mit +dem 5. Zeichen) erzeugt haben. Wenn Bacula das Tape Label schreibt, +werden diese Informationen genutzt um das Tape als Bacula Tape zu erkennen. +Dieses erm\"{o}glicht Tapes mit ANSI oder IBM Labels mit unterschiedlichen Backupprogrammen zu benutzen. + + +\section{Director Pool Konfiguration} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + Diese Direktive ist in der Director Pool und in der SD Device Konfiguration g\"{u}ltig. + Wenn sie in der SD Device Konfiguration angegeben wird, hat sie Vorrang vor dem, + was der Director dem SD \"{u}bergibt. + Der Standardwert ist Label Type = Bacula. +\end{description} + +\section{Storage Daemon Device Konfiguration} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + Diese Direktive ist in der Director Pool und in der SD Device Konfiguration g\"{u}ltig. + Wenn sie in der SD Device Konfiguration angegeben wird, hat sie Vorrang vor dem, + was der Director dem SD \"{u}bergibt. + Der Standardwert ist Label Type = Bacula. + +\item [Check Labels = yes | no] + Diese Direktive ist in der SD Device Konfiguration g\"{u}ltig. + Wenn Sie beabsichtigen, ANSI oder IBM Tape Labels zu lesen, *muss* + sie auf yes gesetzt sein. Auch wenn das Volume kein ANSI oder IBM Label hat, + kann diese Direktive auf yes gesetzt werden, + Bacula wird dann den Typ des Tape Labels automatisch \"{u}berpr\"{u}fen. + Wird sie nicht auf yes gesetzt, wird Bacula annehmen, dass das Volume + mit einem Bacula Tape Label versehen ist, + eine Überpr\"{u}fung auf ANSI oder IBM Tape Labels finden dann nicht statt. + Der Standardwert ist Check Labels = no. + \end{description} diff --git a/docs/manuals/de/concepts/autochangerres.tex b/docs/manuals/de/concepts/autochangerres.tex new file mode 100644 index 00000000..4a322d4e --- /dev/null +++ b/docs/manuals/de/concepts/autochangerres.tex @@ -0,0 +1,110 @@ +\subsection*{Autochanger-Konfiguration} +\label{AutochangerRes} +\index[sd]{Autochanger-Konfiguration } +\index[sd]{Konfiguration!Autochanger } +\addcontentsline{toc}{subsection}{Autochanger-Konfiguration} + +In der Autochanger-Konfiguration k\"{o}nnen Autochanger mit einzelnen oder mehreren Laufwerken angelegt werden, +indem eine oder mehrere Ger\"{a}tekonfigurationen zu einer Einheit, die Bacula Autochanger nennt, +gruppiert werden. (Autochangerherrsteller nennen so etwas auch "Tape Library") + +Damit Ihr Autochanger korrekt funktioniert, +{\bf m\"{u}ssen} Sie eine Autochanger-Konfiguration in der Konfigurationsdatei +des Storage-Dienstes erstellen und in der Konfiguration des Director-Dienstes +{\bf muss} ein entsprechender Storage-Eintrag auf den Autochanger-Namen +in der Storage-Dienst-Konfiguration verweisen. +In fr\"{u}heren Bacula-Versionen verwies die Autochanger-Konfiguration des +Director-Dienstes direkt auf Ger\"{a}te-Konfigurationen des Storage-Dienstes. +Seit Version 1.38.0 ist es nicht mehr m\"{o}glich, aus einer Autochanger-Konfiguration des Director-Dienstes, +direkt auf die Autochanger-Ger\"{a}te zu verweisen. + +\begin{description} +\item [Name = \lt{}Autochanger-Name\gt{}] + \index[sd]{Name} + die Angabe des Autochanger-Namens. Dieser Name wird in der Director-Storage-Definition benutzt um auf den + Autochanger zu verweisen. + Die Konfiguration des Namens ist zwingend erforderlich. + +\item [Device = \lt{}Device-name1, device-name2, ...\gt{}] + die Angabe eines oder mehrerer Ger\"{a}te-Namen, die den Device-Eintr\"{a}gen der Laufwerke + des Autochangers entsprechen. + Wenn Ihr Autochanger mehrere Laufwerke hat, m\"{u}ssen Sie auch mehrere Ger\"{a}te-Namen angeben, + jeweils einen f\"{u}r jede Ger\"{a}te-Konfiguration, die einem Laufwerk des Autochangers entspricht. + Sie k\"{o}nnen mehrere Ger\"{a}te-Namen durch Kommas getrennt in einer Zeile, + oder mehrere Device-Eintr\"{a}ge angeben. + Die Konfiguration der Ger\"{a}te-Namen ist zwingend erforderlich. + +\item [Changer Device = {\it Bezeichner}] + \index[sd]{Changer Device} + der angegebene {\bf Bezeichner} entspricht dem Ger\"{a}te-Namen des Autochangers (nicht der Laufwerke) + der durch das Betriebssystem vergeben wird. + Wenn der Ger\"{a}te-Name hier konfiguriert wird, braucht er nicht mehr in den Device-Eintr\"{a}gen der Laufwerke + angegeben werden. + Wenn der Ger\"{a}te-Name auch in den Device-Eintr\"{a}gen angegeben wird, + hat der dortige Eintrag Vorrang vor der Angabe in der Autochanger-Konfiguration. + +\item [Changer Command = {\it Bezeichner}] + \index[sd]{Changer Command } + der angegebene {\bf Bezeichner} gibt das zu verwendende externe Programm an, + dass Bacula aufruft, um automatisch Volumes zu wechseln. Meistens wird hier + das mit Bacula zur Verf\"{u}gung gestellte {\bf mtx-changer} angegeben. + Wenn der Kommando-Name hier konfiguriert wird, braucht er nicht mehr in den Device-Eintr\"{a}gen der Laufwerke + angegeben werden. + Wenn der Kommando-Name auch in den Device-Eintr\"{a}gen angegeben wird, + hat der dortige Eintrag Vorrang vor der Angabe in der Autochanger-Konfiguration. +\end{description} + +Das Folgende ist ein Beispiel einer g\"{u}ltigen Autochanger-Konfiguration: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "DDS-4-changer" + Device = DDS-4-1, DDS-4-2, DDS-4-3 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} +Device { + Name = "DDS-4-1" + Drive Index = 0 + Autochanger = yes + ... +} +Device { + Name = "DDS-4-2" + Drive Index = 1 + Autochanger = yes + ... +Device { + Name = "DDS-4-3" + Drive Index = 2 + Autochanger = yes + Autoselect = no + ... +} +\end{verbatim} +\normalsize + +Bitte beachten Sie dass es wichtig ist, dass {\bf Autochanger = yes} in allen Device-Eintr\"{a}gen +angegeben wird die zum Autochanger geh\"{o}ren. +Ein Device-Eintrag darf nie zu mehr als einem Autochanger geh\"{o}ren. +Au{\ss}erdem darf die Storage-Konfiguration des Director-Dienstes nur auf die Autochanger-Konfiguration +zeigen und nicht auf die Device-Eintr\"{a}ge. + +Wenn Sie ein Laufwerk des Autochangers nicht automatisch durch Bacula benutzen lassen wollen, +z.B. um immer ein freies Laufwerk f\"{u}r R\"{u}cksicherungen zu haben, +k\"{o}nnen Sie folgendes dem entsprechenden Device-Eintrag hinzuf\"{u}gen: + +\footnotesize +\begin{verbatim} +Autoselect = no +\end{verbatim} +\normalsize + +In diesem Fall wird Bacula das Laufwerk nicht mehr automatisch ausw\"{a}hlen, wenn es auf den Autochanger zugreift. +Sie k\"{o}nnen das Laufwerk weiterhin benutzen, indem Sie direkt den Device-Namen ansprechen, +anstatt des Autochangers. +Ein Beispiel einer solchen Konfiguration sehen Sie oben bei dem Device-Eintrag DDS-4-3. +Diese Laufwerk wird nicht benutzt werden, wenn der Autochanger-Name DDS-4-changer als Storage-Definition +genutzt wird, es l\"{a}sst sich aber direkt, mit entsprechenden Storage-Konfigurations-Eintrag im Director-Dienst, +als Storage DDS-4-3 ansprechen. diff --git a/docs/manuals/de/concepts/autochangers.tex b/docs/manuals/de/concepts/autochangers.tex new file mode 100644 index 00000000..d9d1b789 --- /dev/null +++ b/docs/manuals/de/concepts/autochangers.tex @@ -0,0 +1,916 @@ +%% +%% + +\chapter{Autochanger Unterst\"{u}tzung} +\label{AutochangersChapter} +\index[general]{Unterst\"{u}tzung!Autochanger } +\index[general]{Autochanger Unterst\"{u}tzung } + +Bacula unterst\"{u}tzt Autochanger zum Lesen und Schreiben von Tapes. +Damit Bacula mit einem Autochanger arbeiten kann, m\"{u}ssen einige Voraussetzungen erf\"{u}llt sein, +die Details werden im folgenden gekl\"{a}rt. + +\begin{itemize} +\item Ein Script das den Autochanger, gem\"{a}{\ss} den von Bacula gesendeten Kommandos, steuert. + Bacula stellt solch ein Script in der {\bf depkgs} Distribution zur Verf\"{u}gung. + +\item Jedes Volume (Tape) das benutzt wird, muss sowohl im Katalog definiert sein, + als auch eine Slotnummer zugeteilt sein, nur so kann Bacula wissen, welches Volume + aktuell im Autochanger verf\"{u}gbar ist. + Normalerweise wird das mittels des {\bf label} Kommandos erreicht, + weiter unten wird genauer darauf eingegangen. + Volumes m\"{u}ssen manuell gelabelt werden, bevor sie benutzt werden k\"{o}nnen. + +\item Die Konfigurationsdateien des Storage-Dienstes m\"{u}ssen angepasst werden, + damit Device-Eintr\"{a}ge Autochangern zugeordnet werden k\"{o}nnen, + sowie einige Parameter mehr. + +\item Sie sollten auch die Storage-Definitionen in der Director-Dienst-Konfiguration anpassen, + so dass automatisch nachgefragt wird, welcher Slot genutzt werden soll, wenn ein Volume gelabelt wird. + +\item Sie m\"{u}ssen sicherstellen, dass der Storage-Dienst (wenn er nicht als root ausgef\"{u}hrt wird) + Zugriffsrechte auf die Laufwerks- und auf das Autochanger-Kontroll-Device hat. + +\item Sie m\"{u}ssen {\bf Autochanger = yes} in der Storage-Definitionen des Director-Dienstes setzen, + damit nach dem Slot gefragt wird wenn Sie Volumes labeln. +\end{itemize} + +In Version 1.37 und sp\"{a}ter, gibt es eine neue \ilink{Autochanger-Konfiguration}{AutochangerRes} +die erlaubt, bestimmte Device-Eintr\"{a}ge zu gruppieren um einen Autochanger mit mehreren Laufwerken +zu konfigurieren. Diese Konfiguration m\"{u}ssen Sie benutzen, wenn Sie einen Autochanger verwenden wollen. + +Bacula benutzt sein eigenes {\bf mtx-changer} Script als Interface zu dem Programm, +dass die Steuerung des Autochangers \"{u}bernimmt. {\bf mtx-changer} kann im Prinzip so angepasst werden, +dass es mit jedem Steuerungsprogramm f\"{u}r beliebige Autochanger funktioniert. +Die derzeitige Version von {\bf mtx-changer} arbeitet mit {\bf mtx}. +FreeBSD-Benutzer haben ein Script zur Verf\"{u}gung gestellt (im Verzeichnis {\bf examples/autochangers}), +dass Bacula {\bf chio} benutzen l\"{a}sst. + +Bacula unterst\"{u}tzt Autochanger mir Barcode-Lesern, +dieses beinhaltet zwei Consolen-Kommandos: {\bf label barcodes} und {\bf update slots}. +Im Abschnitt "Barcode Unterst\"{u}tzung" (siehe unten) erfolgt eine detaillierte Beschreibung dieser Kommandos. + +Momentan beinhaltet die Autochanger-Unterst\"{u}tzung keine Stacker und Silos, +und auch keine Laufwerks-Reinigung (Cleaning). Stacker und Silos werden nicht unterst\"{u}tzt, +da sie keinen wahlfreien Zugriff auf ihre Slots erlauben. +Unter Umst\"{a}nden schaffen Sie es vielleicht, einen Stacker (GravityFeed o. \"{a}.) +mit Bacula zum laufen zu bringen, indem Sie Ihre Konfiguration soweit anpassen, dass auf +den Autochanger nur sequentiell zugegriffen wird. +Die Unterst\"{u}tzung f\"{u}r Autochanger mit mehreren Laufwerken erfordert eine +Konfiguration wie in \ilink{Autochanger resource}{AutochangerRes} beschrieben. +Diese Konfiguration ist aber auch f\"{u}r Autochanger mit nur einem Laufwerk zu benutzen. + +Wenn {\bf mtx} korrekt mit Ihrem Autochanger zusammenarbeitet, +dann ist es nur eine Frage der Anpassung des {\bf mtx-changer} Scripts (falls n\"{o}tig) +um den Autochanger mit Bacula zu benutzen. +Eine Liste mit von {\bf mtx} unterst\"{u}zten Autochangern, finden Sie unter folgendem Link: +\elink{http://mtx.opensource-sw.net/compatibility.php}{http://mtx.opensource-sw.net/compatibility.php}. +Die Homepage des {\bf mtx} Projekts ist: +\elink{http://mtx.opensource-sw.net/}{http://mtx.opensource-sw.net/}. + +Anmerkung: wir haben R\"{u}ckmeldungen von einigen Benutzern erhalten, +die \"{u}ber gewisse Inkompatibilit\"{a}ten zwischen dem Linux-Kernel und mtx berichten. +Zum Beispiel zwischen Kernel 2.6.18-8.1.8.el5 von CentOS und RedHat und Version 1.3.10 +und 1.3.11 von mtx. Ein Umstieg auf Kernel-Version 2.6.22 hat diese Probleme behoben. + +Zus\"{a}tzlich scheinen einige Versionen von mtx, z.B. 1.3.11, die maximale Anzahl der Slots auf 64 +zu begrenzen, Abhilfe schafft die Benutzung von mtx-Version 1.3.10. + +Wenn Sie Probleme haben, benutzen Sie bitte das {\bf auto} Kommando im {\bf btape} Programm, +um die Funktionalit\"{a}t des Autochangers mit Bacula zu testen. +Bitte bedenken Sie, dass bei vielen Distributionen (z.B. FreeBSD, Debian, ...) der Storage-Dienst +nicht als Benutzer und Gruppe {\bf root} l\"{a}ft, sonder als Benutzer {\bf bacula} und Gruppe {\bf tape}. +In diesem Fall m\"{u}ssen Sie sicherstellen, das der Benutzer oder die Gruppe entsprechende Rechte hat, +um auf den Autochanger und die Laufwerke zuzugreifen. + +Manche Benutzer berichten, dass der Storage-Dienst unter Umst\"{a}nden +beim laden eines Tapes in das Laufwerk blockiert, falls schon ein Tape im Laufwerk ist. +Soweit wir das ermitteln konnten, ist es einfache eine Frage der Wartezeit: +Das Laufwerk hat vorher ein Tape beschrieben und wird f\"{u}r eine ganze Zeit +(bis zu 7 Minuten bei langsamen Laufwerken) im Status BLOCKED verbleiben, +w\"{a}hrend das Tape zur\"{u}ckgespult und entladen wird, erst danach kann ein anderes +Tape in das Laufwerk geladen werden. + +\label{SCSI devices} +\section{Zuordnung der SCSI Ger\"{a}te} +\index[general]{Zuordnung der SCSI Ger\"{a}te} +\index[general]{SCSI Ger\"{a}te} +\index[general]{Ger\"{a}te!SCSI} + +Unter Linux k\"{o}nnen Sie: +\footnotesize +\begin{verbatim} +cat /proc/scsi/scsi +\end{verbatim} +\normalsize + +ausf\"{u}hren, um zu sehen welche SCSI-Ger\"{a}te Sie haben. +Zudem k\"{o}nnen Sie: +\footnotesize +\begin{verbatim} +cat /proc/scsi/sg/device_hdr /proc/scsi/sg/devices +\end{verbatim} +\normalsize + +benutzen, um herauszufinden, welches das Autochanger-Kontroll-Device ist, +({\bf /dev/sg0} f\"{u}r die erste Zeile, {\bf /dev/sg1} f\"{u}r die zweite, ...) +das Sie in der Konfiguration unter {\bf Changer Device = } angeben m\"{u}ssen. + +F\"{u}r weiterf\"{u}hrende Information \"{u}ber SCSI-Ger\"{a}te, schauen Sie bitte in den Abschnitt +\ilink{Linux SCSI Tricks}{SCSITricks} aus dem Tape-Testing-Kapitel des Handbuchs. + +Unter FreeBSD k\"{o}nnen Sie: + +\footnotesize +\begin{verbatim} +camcontrol devlist +\end{verbatim} +\normalsize + +benutzen, um SCSI-Ger\"{a}te und die Kontroll-Devices {\bf /dev/passn} des Autochangers anzuzeigen, +die Sie in der Konfiguration unter {\bf Changer Device = } angeben m\"{u}ssen. + +Bitte stellen Sie sicher, dass der Storage-Dienst auf diese Ger\"{a}te zugreifen darf. + +Der folgende Tipp f\"{u}r FreeBSD-Benutzer kommt von Danny Butroyd: +beim Neustart des Computers hat Bacula keine Berechtigung auf das Autochanger-Kontroll-Device +(z.B. /dev/pass0) zuzugreifen, +Um dies zu umgehen, editieren Sie die Datei /etc/devfs.conf und f\"{u}gen unten diese Zeilen hinzu: + +\footnotesize +\begin{verbatim} +own pass0 root:bacula +perm pass0 0666 +own nsa0.0 root:bacula +perm nsa0.0 0666 +\end{verbatim} +\normalsize + +Das gibt der Gruppe bacula, nur um sicher zu gehen, auch die Schreib-Berechtigung f\"{u}r das Ger\"{a}t nsa0.0. +Damit die neue Konfiguration wirksam wird, m\"{u}ssen Sie: + +/etc/rc.d/devfs restart + +ausf\"{u}hren. +Danach brauchen Sie nie wieder die Berechtigungen von Hand zu setzen, wenn der Computer neu gestartet wurde. + +\label{scripts} +\section{Beispiel Scripte} +\index[general]{Scripte!Beispiel } +\index[general]{Beispiel Scripte } + +Lesen Sie bitte den nachfolgenden Abschnitt, damit Sie verstehen wie Bacula mit Autochangern arbeitet. +Auch wenn Bacula ein standard {\bf mtx-changer} Script installiert, ben\"{o}tigen Sie f\"{u}r Ihren Autochanger +eventuell einige Anpassungen. Falls Sie Beispiele sehen wollen, schauen Sie bitte in das Verzeichnis +{\bf\lt{}bacula-src\gt{}/examples/devices}, wo Sie eine {\bf HP-autoloader.conf} Bacula-Ger\"{a}te-Konfiguration, +sowie mehrere {\bf mtx-changer} Scripte finden werden, die schon f\"{u}r unterschiedliche Autochanger angepasst sind. + +\label{Slots} + +\section{Slots} +\index[general]{Slots } + +Um den Autochanger richtig ansteuern zu k\"{o}nnen, muss Bacula wissen +welches Volume in welchem Slot des Autochangers ist. In den Slots werden die Tapes aufbewahrt, +die nicht in einem Laufwerk geladen sind. Bacula nummeriert diese Slots von eins bis zur Anzahl der +vorhandenen Tapes im Autochanger. + +Bacula benutzt niemals ein Volume im Autochanger, dass nicht gelabelt ist, dem keine Slotnummer im Katalog +zugewiesen ist oder wenn das Volume nicht als InChanger im Katalog markiert ist. Bacula muss wissen wo das +Volume/Tape ist, sonst kann es nicht geladen werden. +Jedem Volume im Autochanger muss \"{u}ber das Console-Programm eine Slot-Nummer zugewiesen werden. +Diese Information wird im Katalog, zusammen mit anderen Informationen \"{u}ber das Volume, gespeichert. +Wenn kein Slot angegeben, oder der Slot auf Null gesetzt ist, wird Bacula das Volume nicht benutzen, +auch wenn alle anderen ben\"{o}tigten Konfigurationsparameter richtig gesetzt sind. +Wenn Sie das {\bf mount} Console-Kommando ausf\"{u}hren, m\"{u}ssen Sie angeben welches Tape aus welchem Slot +in das Laufwerk geladen werden soll. Falls schon ein Tape im Laufwerk ist, wird es entladen und danach das +beim {bf\ mount} angegeben Tape geladen. Normalerweise wird kein anderes Tape im Laufwerk sein, da Bacula beim +{\bf unmount} Console-Kommando das Laufwerk leert. + +Sie k\"{o}nnen die Slot-Nummer und die InChanger-Markierung \"{u}berpr\"{u}fen, indem Sie: +\begin{verbatim} +list Volumes +\end{verbatim} +im Consolen-Programm ausf\"{u}hren. + +\label{mult} +\section{mehrere Laufwerke} +\index[general]{Laufwerke!mehrere } +\index[general]{mehrere Laufwerke } + +Einige Autochanger haben mehr als ein Laufwerk. Die in Version 1.37 vorgestellte \ilink{Autochanger-Konfiguration}{AutochangerRes}, erlaubt Ihnen mehrere Ger\"{a}te-Konfigurationen, +die jeweils einem Laufwerk entsprechen, zu einem Autochanger zu gruppieren. Der Director-Dienst k\"{o}nnte trotzdem +die Laufwerke direkt ansprechen, aber dies zu erlauben, w\"{u}rde die einwandfreie Zusammenarbeit der Laufwerke +einschr\"{a}nken. Anstelle dessen sollte dem Director-Dienst, in der Director-Storage-Konfiguration, eine Autochanger-Konfiguration zugewiesen werden. Dieses erlaubt dem Storage-Dienst sicherzustellen, dass nur auf +ein Laufwerk zur Zeit vom {\bf mtx-changer} Script zugegriffen wird und nicht beide Laufwerke auf dasselbe Volume verweisen. + +Mehrere Laufwerke erfordern das Setzen des {\bf Drive Index} in den Ger\"{a}te-Eintr\"{a}gen der +Storage-Dienst-Konfiguration. +Laufwerks-Nummern bzw. der {\bf Drive Index} beginnen standardm\"{a}{\ss}ig bei Null. +Um mit dem zweiten Laufwerk im Autochanger arbeiten zu k\"{o}nnen, muss ein weiterer Ger\"{a}te-Eintrag +erstellt werden, wobei der {\bf Drive Index} dann Eins ist. +Normalerweise wird das zweite Laufwerk dasselbe {\bf Changer Device} verwenden, +aber ein anderes {\bf Archive Device}. + +Bacula Jobs werden bevorzugt auf das Volume geschrieben, dass schon in einem Laufwerk geladen ist. +Wenn Sie mehrere Laufwerke haben und Bacula auf mehreren Laufwerke gleichzeitig Jobs, +die denselben Pool verwenden, schreiben soll, muss der Parameter \ilink{Prefer Mounted Volumes} {PreferMountedVolumes} +in der Director-Dienst-Konfiguration in den entsprechenden Job-Eintr\"{a}gen auf "no" gesetzt werden. +Der Storage-Dienst wird daraufhin so viele Volumes wie m\"{o}glich in die Laufwerke laden. + +\label{ConfigRecords} +\section{Ger\"{a}te-Konfigurations-Parameter} +\index[general]{Parameter!Ger\"{a}te-Konfiguration } +\index[general]{Ger\"{a}te-Konfigurations-Parameter } + +Bacula's Autochanger-Konfiguration wird in den Ger\"{a}te-Eintr\"{a}gen des Storage-Dienstes festgelegt. +Vier Parameter: {\bf Autochanger}, {\bf Changer Device},{\bf Changer Command}, und {\bf Maximum Changer Wait} +steuern wie Bacula den Autochanger benutzt. + +Diese vier Parameter der {\bf Device}-Konfiguration, sind unten detailliert beschrieben. +{\bf Changer Device} und {\bf Changer Command} werden in der Gr\"{a}te-Konfiguration nicht ben\"{o}tigt, +wenn sie in der {\bf Autochanger}-Konfiguration stehen. + +\begin{description} + +\item [Autochanger = {\it Yes|No} ] + \index[sd]{Autochanger } + Der {\bf Autochanger}-Parameter gibt an, ob der Ger\"{a}te-Eintrag einen Autochanger beschreibt oder nicht. + Der Standardwert ist Autochanger = No. + +\item [Changer Device = \lt{}device-name\gt{}] + \index[sd]{Changer Device } + Zus\"{a}tzlich zu dem Archive Device Eintrag, muss das {\bf Changer Device} angegeben werden. +Das ist notwendig, weil die meisten Autochanger \"{u}ber ein anderes Ger\"{a}t gesteuert werden, +als f\"{u}r das Schreiben und Lesen der Volumes verwendet wird. +Ein Beispiel: unter Linux wird normalerweise das generische SCSI-Interface zum Steuern des Autochangers verwendet, +w\"{a}rend das standard SCSI-Interface f\"{u}r Lese- und Schreibvorg\"{a}ge genutzt wird. +F\"{u}r das {\bf Archive Device = /dev/nst0} hat man dann typischerweise das {\bf Changer Device = /dev/sg0}. +Gr\"{o}{\ss}ere Autochanger, mit mehreren Laufwerken und vielen Slots, k\"{o}nnen das Kontroll-Device +auch auf z.B. {\bf Changer Device = /dev/sg2} haben. + +Unter FreeBSD liegt das Kontroll-Device zwischen {\bf /dev/pass0} und {\bf /dev/passn}. + +Unter Solaris finden Sie das Kontroll-Device im Verzeichnis {\bf /dev/rdsk}. + +Stellen Sie bitte sicher, dass der Storage-Dienst die notwendigen Rechte besitzt, +um auf die entsprechenden Ger\"{a}te zugreifen zu d\"{u}rfen. + +\item [Changer Command = \lt{}command\gt{}] + \index[sd]{Changer Command } + Dieser Parameter gibt an, welches externe Kommando und mit welchen Argumenten, +aufgerufen wird, um den Autochanger zu steuern. +Es wird vorausgesetzt, dass dieses Kommando ein normales Programm oder Shell-Script ist, +das vom Betriebssystem ausgef\"{u}hrt werden kann. +Dieses Kommando wird jedes mal aufgerufen, wenn Bacula das Autochanger-Kontroll-Device ansprechen m\"{o}chte. +Die folgenden Ersetzungen werden durchgef\"{u}hrt, bevor das {\bf command} dem Betriebssystem zur +Ausf\"{u}hrung \"{u}bergeben wird: + +\footnotesize +\begin{verbatim} + %% = % + %a = archive device name + %c = changer device name + %d = changer drive index base 0 + %f = Client's name + %j = Job name + %o = command (loaded, load, or unload) + %s = Slot base 0 + %S = Slot base 1 + %v = Volume name +\end{verbatim} +\normalsize + +Hier ist ein Beispiel f\"[{u}r die Benutzung von {\bf mtx} mit dem {\bf mtx-changer} Script, +dass in der Bacula-Distribution enthalten ist: + +\footnotesize +\begin{verbatim} +Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +\end{verbatim} +\normalsize + +Falls das {\bf mtx-changer} Script nicht in {\bf /etc/bacula} liegt, +m\"{u}ssen Sie den Pfad entsprechend anpassen, +Einzelheiten zu den drei von Bacula benutzten Kommandos (loaded, load, unload), +sowie zu den von Bacula erwarteten Ausgaben des {\bf mtx-changer} Scripts, +werden weiter unten im Abschnitt {\bf Bacula Autochanger Schnittstelle} beschrieben.. + +\item [Maximum Changer Wait = \lt{}time\gt{}] + \index[sd]{Maximum Changer Wait } + Dieser Parameter gibt an, wie lange Bacula maximal warten soll, +bis der Autochanger auf ein Kommando (z.B. load) reagiert. +Der Standardwert betr\"{a}gt 120 Sekunden. Wenn Sie einen langsamen Autochanger haben, +m\"{u}ssen Sie hier eventuell eine l\"{ä}ngere Zeit konfigurieren. + +Wenn der Autochanger nicht innerhalb der {\bf Maximum Changer Wait} Zeit antwortet, +wird das Kommando abgebrochen und Bacula wird das Eingreifen des Bedieners verlangen. + +\item [Drive Index = \lt{}number\gt{}] + \index[sd]{Drive Index } + Dieser Parameter gibt die Nummer des Laufwerks innerhalb des Autochangers an. +Da die Nummerierung bei Null beginnt, wird das zweite Laufwerk mit folgendem Eintrag angegeben: + +\footnotesize +\begin{verbatim} +Device Index = 1 + +\end{verbatim} +\normalsize + +Um das zweite Laufwerk nutzen zu k\"{o}nnen, muss ein zweiter Device-Eintrag in der Konfigurationsdatei des +Storage-Dienstes erstellt werden. Einzelheiten dazu stehen, weiter oben in diesem Kapitel, in dem Abschnitt +{\bf mehrere Laufwerke} +\end{description} + +Damit der Autochanger zuverl\"{a}{\ss}ig funktioniert, muss zus\"{a}tzlich ein Autochanger-Eintrag erstellt werden. +\input{autochangerres} + +\label{example} +\section{eine Beispiel-Konfigurationsdatei} +\index[general]{Beispiel-Konfigurationsdatei} +\index[general]{Datei!Beispiel Konfiguration } + +Die folgenden beiden Konfigurations-Eintr\"{a}ge realisieren einen Autochanger: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "Autochanger" + Device = DDS-4 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} + +Device { + Name = DDS-4 + Media Type = DDS-4 + Archive Device = /dev/nst0 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} +\end{verbatim} +\normalsize + +Wobei Sie {\bf Archive Device}, {\bf Changer Device} und den Pfad zum +{\bf Changer Command} Ihrem System entsprechend anpassen m\"{u}ssen. + +\section{eine Beispiel-Konfigurationsdatei f\"{u}r mehrere Laufwerke} +\index[general]{Beispiel-Konfigurationsdatei f\"{u}r mehrere Laufwerke} + +Die folgenden Konfigurations-Eintr\"{a}ge realisieren einen Autochanger mit mehreren Laufwerken: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "Autochanger" + Device = Drive-1, Drive-2 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} + +Device { + Name = Drive-1 + Drive Index = 0 + Media Type = DDS-4 + Archive Device = /dev/nst0 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} + +Device { + Name = Drive-2 + Drive Index = 1 + Media Type = DDS-4 + Archive Device = /dev/nst1 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} + +\end{verbatim} +\normalsize + +Wobei Sie {\bf Archive Device}, {\bf Changer Device} und den Pfad zum +{\bf Changer Command} Ihrem System entsprechend anpassen m\"{u}ssen. + +\label{SpecifyingSlots} +\section{Festlegen der Slots beim Labeln} +\index[general]{Festlegen der Slots beim Labeln } +\index[general]{Labeln!Festlegen der Slots } + +Wenn Sie einen {\bf Autochanger = yes} Eintrag in Ihrer Storage-Konfiguration des +Director-Dienstes hinzugef\"{u}gt haben, wird die Bacula Console Sie bei diesen beiden Kommandos +{\bf add} und {\bf label} automatisch nach einem Slot f\"{u}r die jeweilige Aktion fragen. +Beim {\bf label} Kommando wird Bacula automatisch das richtige Volume in ein Laufwerk laden. + +Au{\ss}erdem muss, wie oben beschrieben, der Parameter {\bf Autochanger = yes} in der Ger\"{a}te-Konfiguration +des Storage-Dienstes vorhanden sein, damit der Autochanger benutzt werden kann. +N\"{a}here Informationen zu diesen Parametern finden Sie in der \ilink{Storage Konfiguration}{Autochanger1} +des Director-Kapitels und in \ilink{Device Konfiguration}{Autochanger} des Storage-Kapitels. + +Somit k\"{o}nnen alle Aktionen mit dem Autochanger komplett automatisiert werden. +Zudem ist es m\"{o}glich mit dem Men\"{u}punkt {\bf Volume Parameters} des Consolen-Kommandos {\bf update} den Slot +zu setzen und zu \"{a}ndern. + +Selbst wenn alle oben genannten Konfigurationen und Parameter richtig angegeben sind, +wird Bacula nur dann korrekt mit den Volumes im Autochanger arbeiten, wenn +den Volume-Eintr\"{a}ge im Katalog, die den Tapes im Autochanger entsprechenden, +auch eine {\bf slot}-Nummer zugewiesen ist. + +Wenn Ihr Autochanger Barcodes unterst\"{u}tzt, k\"{o}nnen Sie alle Volumes im Autochanger, +eins nach dem anderen, labeln indem Sie das Console-Kommando {\bf label barcodes} verwenden. +Jedes Tape mit Barcode, wird von Bacula in ein Laufwerk geladen und dann mit dem selben Namen gelabelt, +der auch auf dem Barcode steht. Gleichzeitig wird ein Katalog-Eintrag f\"{u}r das Volume angelegt. +Wenn der Barcode mit der Zeichenkette beginnt, die als {\bf CleaningPrefix= } konfiguriert ist, +wird Bacula das Tape f\"{u}r ein Reinigungsband halten und es wird nicht versucht das Tape zu labeln. +Ein Beispiel: + +\footnotesize +\begin{verbatim} +Pool { + Name ... + Cleaning Prefix = "CLN" +} +\end{verbatim} +\normalsize + +Jedes Volume mit einem Barcode wie CLNxxxxx wird als Reinigungsband behandelt und nicht gelabelt. + +Bitte bedenken Sie, dass jedes Volume, dass der Autochanger automatisch benutzen soll, bereits vor-gelabelt sein muss. +Wenn Sie keinen Barcode-Leser haben, muss das von Hand geschehen (oder durch ein Script). + +\section{Tape-Wechsel} +\index[general]{Tapewechsel } +Wenn Sie Tapes dem Autochanger entnehmen oder hinzuf\"{u}gen wollen, +oder das {\bf mtx} Kommando von Hand aufrufen wollen, +m\"{u}ssen Sie Bacula den Autochanger freigeben lassen, +indem Sie folgendes Console-Kommando ausf\"{u}hren: + +\footnotesize +\begin{verbatim} +unmount +(wechseln der Tapes und/oder mtx ausf\"{u}hren +mount +\end{verbatim} +\normalsize + +Wenn Sie den Autochanger nicht freigeben, wei{\ss} Bacula +nach dem Tapewechsel nicht mehr, welches Volume in welchen Slot des Autochanger ist +und wird nicht mehr korrekt mit dem Autochanger arbeiten k\"{o}nnen. +Bacula geht immer davon aus, dass es exklusiven Zugriff auf den Autochanger hat, +solange ein Laufwerk gemountet ist. + + +\label{Magazines} +\section{Arbeiten mit mehreren Magazinen} +\index[general]{Arbeiten mit mehreren Magazinen } +\index[general]{Magazine!Arbeiten mit mehreren } + +Wenn Sie mehrere Magazine haben, oder wenn Sie Tapes in den Magazinen tauschen, +m\"{u}ssen Sie Bacula dar\"{u}ber informieren. Bacula wird immer die Tapes im Autochanger +bevorzugt vor anderen Tapes benutzen, somit werden Bedienereingriffe minimiert. + +Wenn Ihr Autochanger mit Barcodes (maschinenlesbare Tape Labels) arbeitet, +ist der Schritt, Bacula \"{u}ber die im Autochanger verf\" {u}gbaren Tapes zu informieren, sehr einfach. +Jedes mal wenn Sie ein Magazin wechseln, oder Tapes aus dem Magazine entfernen bzw. hinzuf\"{u}gen, +f\"{u}hren Sie einfach: + +\footnotesize +\begin{verbatim} +unmount +(Magazin/Tapes wechseln) +update slots +mount +\end{verbatim} +\normalsize + +im Console-Programm aus. Daraufhin wird Bacula den Autochanger nach einer aktuellen Liste +der in den Magazinen verf\"{u}gbaren Tapes fragen. Bei diesem Vorgang werden keine Tapes gelesen, +diese Informationen werden vom Autochanger w\"{a}hrend des Inventory ermittelt. +Bacula aktualisiert die Volume-Eintr\"{a}ge im Katalog, so dass bei allen in den Magazinen vorhandenen Tapes +das {\bf InChanger} Flag und auch die Slot-Nummern richtig gesetzt werden. + +Falls Sie keinen Barcode-Leser im Autochanger haben, gibt es mehrere andere M\"{o}glichkeiten. + +\begin{enumerate} +\item Sie k\"{o}nnen den Slot und das {\bf InChanger} Flag manuell setzen, indem Sie das {\bf update volume} +Consolen-Kommando verwenden (sehr umst\"{a}ndlich). + +\item Sie k\"{o}nnen das + +\footnotesize +\begin{verbatim} +update slots scan +\end{verbatim} +\normalsize + + Consolen-Kommando ausf\"{u}hren. Daraufhin wird Bacula jedes Tape nacheinander in ein Laufwerk laden, + das Tape Label lesen und den Katalog (Slot, InChanger-Flag) aktualisieren. + Dieses Vorgehen ist zwar wirkungsvoll, aber auch sehr langsam. + +\item Sie k\"{o}nnen das {\bf mtx-changer} Script anpassen, damit es die Barcodes im Autochanger simuliert (siehe unten). +\end{enumerate} + +\label{simulating} +\section{Simulieren von Barcodes im Autochanger} +\index[general]{Autochanger!Simulieren von Barcodes im } +\index[general]{Simulieren von Barcodes im Autochanger } + +Sie k\"{o}nnen die Barcodes im Autochanger simulieren, indem Sie das {\bf mtx-changer} Script so anpassen, +dass es die selben Informationen zur\"{u}ckgibt, die ein Autochanger mit Barcodes liefert. +Dazu wird die folgende Zeile im {\bf mtx-changer} Script: + +\footnotesize +\begin{verbatim} + ${MTX} -f $ctl status | + grep " *Storage Element [0-9]*:.*Full" | + awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" +\end{verbatim} +\normalsize +(Der Zeilenumbruch dient hier nur der Darstellung, im {\bf mtx-changer} Script ist es eine Zeile) + +durch ein \# auskommentiert oder einfach gel\"{o}scht (Zeilennummer ist ungef\"{a}hr 99). +An ihrer Stelle wird eine neue Zeile erstellt, die den Inhalt einer Datei ausgibt. +Zum Beispiel: + +\footnotesize +\begin{verbatim} +cat /etc/bacula/changer.volumes +\end{verbatim} +\normalsize + +Stellen Sie sicher, dass Sie den kompletten Pfad zur Datei angeben, Ort und Name der Datei sind egal. +Die Inhalt der Datei muss folgenden Beispiel entsprechen: + +\footnotesize +\begin{verbatim} +1:Volume1 +2:Volume2 +3:Volume3 +... +\end{verbatim} +\normalsize + +Wobei die 1, 2 und 3 die Slot-Nummern und Volume1, Volume2 und Volume3 die Namen (bzw. Barcodes) sind. +Sie k\"{o}]nnen mehrere Datei erstellen, die den Tapes in verschiedenen Magazinen entsprechen und beim Wechsel +der Magazine einfach die f\"{u}r das Magazine g\"{u}ltige Datei in die {\bf /etc/bacula/changer.volumes} kopieren. +Sie brauchen Bacula nicht neu zu starten, wenn Sie Magazine wechseln, nur die Datei muss den richtigen Inhalt haben. +Wenn Sie dann das Console-Kommando {\bf update slots} ausf\"{u}hren, wird Ihr Autochanger f\"{u}r Bacula so erscheinen, +als ob er Barcodes unterst\"{u}tzen w\"{u}rde. + + +\label{updateslots} +\section{Alle Parameter des Update Slots Kommandos} +\index[general]{Alle Parameter des Update Slots Kommandos } +\index[general]{Kommandos!alle Parameter des Update Slots } + +Wenn Sie ncht alle Slots \"{u}berpr\"{u}fen lassen wollen, nur weil Sie ein Tape im Magazin getauscht haben, +k\"{o}nnen Sie das Consolen-Kommando {\bf update slots}, genauso wie das Kommando {\bf update slots scan}, +mit zus\"{a}tzlichen Parametern aufrufen: + +\footnotesize +\begin{verbatim} +update slots=n1,n2,n3-n4, ... +\end{verbatim} +\normalsize + +wobei der Parameter {\bf scan} optional ist. Die Parameter n1, n2, n3-n7... geben die Slots an, +wobei n1, n2 f\"{u}r einzelne Slots und n3-n7 f\"{u}r einen Bereich von Slots steht (n3 bis n7). + +Diese Parameter sind n\"{u}tzlich, wenn Sie {\bf update slots scan} (sehr langsam) ausf\"{u}hren und dabei +die Slots auf die mit gewechselten Tapes begrenzen k\"{o}nnen. + +Als Beispiel, das Console-Kommando : + +\footnotesize +\begin{verbatim} +update slots=1,6 scan +\end{verbatim} +\normalsize + +veranlasst Bacula, das Tape im ersten Slot des Autochangers in ein Laufwerk zu laden, das Label zu lesen und den +Katalog entsprechend zu aktualisieren. +Danach passiert dasselbe mit dem Tape im sechsten Slot. +Das Console-Kommando: + +\footnotesize +\begin{verbatim} +update slots=1-3,6 +\end{verbatim} +\normalsize + +liest die Barcodes der Tapes in den Slots 1, 2, 3 und 6 und aktualisiert den Katalog. +Wenn Ihr Autochanger keinen Barcode-Leser hat und Sie das {\bf mtx changer} Script nicht, wie oben beschrieben, +angepasst haben, wird dieses Console-Kommando keine Tapes finden und folglich nichts tun. + +\label{FreeBSD} +\section{FreeBSD Belange} +\index[general]{Belange!FreeBSD } +\index[general]{FreeBSD Belange } + +Falls unter FreeBSD Probleme auftreten, wenn Bacula versucht auf ein Laufwerk zuzugreifen +und folgende Fehlermeldung erscheint: {\bf Device not configured}, +passiert dass weil FreeBSD den Ger\"{a}te-Eintrag {\bf /dev/nsa1} entfernt, wenn kein Tape im Laufwerk ist. +Das hat zur Folge, dass Bacula das Ger\"{a}t nicht \"{o}ffnen kann. Die L\"{o}sung f\"{u}r dieses Problem ist es, +sicherzustellen, dass immer ein Tape im Laufwerk ist, wenn Bacula gestartet wird. +Diese Problem ist in den Bacula-Versionen 1.32f-5 und sp\"{a}ter behoben. + +Beachten Sie bitte das Kapitel \ilink{Laufwerk-Tests}{FreeBSDTapes} bevor Sie den Autochanger testen, +dort finden Sie weitere {\bf wichtige} Informationen die Laufwerke betreffend. + +\label{AutochangerTesting} +\section{Autochanger-Test und Anpassung des mtx-changer Scripts} +\index[general]{Autochanger-Test } +\index[general]{Anpassung des mtx-changer Scripts} + + +Bevor Sie den Autochanger gleich mit Bacula ausprobieren, ist es vorzuziehen, zuerst von Hand +zu testen ob er richtig funktioniert. +Um das zu tun, empfehlen wir, dass Sie die folgenden Kommandos ausf\"{u}hren (wobei angenommen wird, +dass das {\bf mtx-changer} Script unter {\bf /etc/bacula/mtx-changer} liegt): + +\begin{description} + +\item [Stellen Sie sicher, dass Bacula nicht l\"{a}uft.] + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ list \ 0 \ /dev/nst0 \ 0] +\index[sd]{mtx-changer list} + +Das Kommando sollte diese Ausgabe erzeugen: + +\footnotesize +\begin{verbatim} + 1: + 2: + 3: + ... + +\end{verbatim} +\normalsize + +eine oder mehrere Zeilen f\"{u}r jeden belegten Slot im Autochanger, +wobei hinter jeder Zahl ein Doppelpunkt ({\bf :}) stehen muss. +Wenn Ihr Autochanger Barcodes unterst\"{u}tzt, steht hinter dem Doppelpunkt der Barcode. +Falls ein Fehler auftritt, muss die Ursache gefunden werden +(versuchen Sie z.B. ein anderes Kontroll-Device zu verwenden, falls {\bf /dev/sg0} falsch ist). +Unter FreeBSD z.B. liegt das Kontroll-Device gew\"{o}hnlich auf {\bf /dev/pass2}. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ slots ] +\index[sd]{mtx-changer slots} + +Das Kommando sollte die Anzahl der Slots im Autochanger anzeigen. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ 1 \ /dev/nst0 \ 0 ] +\index[sd]{mtx-changer unload} + + Falls das Tape aus Slot 1 in einem Laufwerk geladen ist, sollte es jetzt entladen werden. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ load \ 3 \ /dev/nst0 \ 0 ] +\index[sd]{mtx-changer load} + +Angenommen in Slot 3 ist ein Tape, dann wird es jetzt in das erste Laufwerk geladen (\bf Drive Index = 0) + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ loaded \ 0 \ /dev/nst0 \ 0] +\index[sd]{mtx-changer loaded} + +Dieses Kommando sollte jetzt 3 ausgeben (Die Slot-Nummer des in Laufwerk 0 geladenen Tapes.). +Beachten Sie, dass wir im Kommando eine ung\"{u}ltige Slotnummer 0 verwendet haben. +In diesem Fall, wird sie einfach ignoriert, weil sie nicht ben\"{o}tigt wird. +Allerdings musste eine Slot-Nummer angegeben werden, weil der Laufwerksparameter +am Ende des Kommandos erforderlich war, um das richtige Laufwerk zu w\"{a}hlen. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ 3 /dev/nst0 \ 0] + +wird das Laufwerk mit {\bf Drive Index = 0} in Slot 3 entladen. + +\end{description} + +Nachdem alle oben genannten Kommandos funktionieren und in der +Storage-Dienst-Konfiguration auch das richtige {\bf Changer Command} angegeben ist, +sollte Bacula jetzt mit Ihrem Autochanger arbeiten k\"{o}nnen. +Das letzte verbleibende Problem ist, dass der Autochanger einige Zeit ben\"{o}tigt, +das Tape zu laden, nachdem das entsprechende Kommando abgesetzt wurde. +Wenn sich das {\bf mtx-changer} Script nach dem load-Kommando beendet, +wird Bacula sofort versuchen das Tape zur\"{u}ckzuspulen und zu lesen. +Wenn Bacula Ein-/Ausgabe-Fehler nach dem Laden des Tapes meldet, werden Sie eventuell eine +Verz\"{o}gerungszeit (z.B. {\bf sleep 20}) im {\bf mtx changer} Script nach dem {\bf mtx} Kommando +einf\"{u}gen m\"{u}ssen. Bitte bedenken Sie, dass egal was Sie dem {\bf mtx changer} Script an Kommandos +hinzuf\"{u}gen, sich das Script immer mit {\bf exit 0} beendet. +Bacula \"{u}berpr\"{u}ft den R\"{u}ckgabewert des Script nach jedem Aufruf und er muss immer 0 sein, +wenn alles geklappt hat. + +Ob Sie eine {\bf sleep}-Zeit im Script angeben m\"{u}ssen, k\"{o}nnen Sie mit folgenden +Kommandos \"{u}berpr\"{u}fen, indem Sie sie in ein Script schreiben und ausf\"{u}hren. + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload 1 /dev/nst0 0 +/etc/bacula/mtx-changer /dev/sg0 load 3 /dev/nst0 0 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +Wenn das Script funktioniert, haben Sie wahrscheinlich keine zeitlichen Probleme. +Wenn es nicht funktioniert, tragen Sie, direkt hinter dem mtx-changer load Kommando, +{\bf sleep 30} oder auch {\bf sleep 60} ein. Wenn es damit funktioniert, +\"{u}bernehmen Sie den passenden {\bf sleep}-Eintrag in das {\bf mtx-changer} Script, +so wird diese Verz\"{o}gerungszeit jedes mal angewendet, wenn Bacula das Script aufruft. + +Ein zweites Problem, dass einige Autochanger betrifft, ist dass die Laufwerke diese Autochanger das Tape +auswerfen m\"{u}ssen, bevor es aus dem Laufwerk entfernt werden kann. Falls das zutrifft, wird das Kommando +{\bf load 3} niemals erfolgreich beendet werden, egal wie lange Sie warten. +In diesem Fall, k\"{o}nnen Sie ein Auswurf-Kommando direkt hinter das {\bf unload} setzen, +so dass das Script dann so aussieht: + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload 1 /dev/nst0 0 +mt -f /dev/st0 offline +/etc/bacula/mtx-changer /dev/sg0 load 3 /dev/nst0 0 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +Nat\"{u}rlich m\"{u}ssen Sie das {\bf offline} Kommando in das {\bf mtx changer} Script \"{u}bernehmen, +falls es das Problem behebt. Da Bacula den R\"{u}ckgabewert des {\bf mtx changer} Scripts \"{u}berpr\"{u}ft, +stellen sie wiederum sicher, dass er immer 0 ist, bzw. das der R\"{u}ckgabewert des {\bf mtx} Kommandos an +Bacula \"{u}bergeben wird. + +Wie vorher schon angemerkt, sind im Verzeichnis {\bf \lt{}bacula-source\gt{}/examples/devices} mehrere +Scripte, die die oben genannten Kommandos bereits enthalten. Sie k\"{o}nnen eine Hilfe sein, um Ihr Script +zum laufen zu bringen. + +Wenn Bacula den Fehler {\bf Rewind error on /dev/nst0. ERR=Input/output error.} ausgibt, +werden Sie in den meisten F\"{a}llen eine l\"{a}ngere {\bf sleep}-Zeit in Ihrem {\bf mtx-changer} Script +hinzuf\"{u}gen m\"{u}ssen, bevor es nach dem {\bf load} Kommando beendet wird. + +\label{using} +\section{Arbeiten mit dem Autochanger} +\index[general]{Arbeiten mit dem Autochanger } +\index[general]{Autochanger!Arbeiten mit dem } + +Angenommen, Sie haben alle notwendigen Storage-Dienst-Device-Eintr\"{a}ge richtig konfiguriert +und Sie haben einen {\bf Autochanger = yes} Eintrag zu der Storage-Konfiguration im Director-Dienst +hinzuge\"{u}gt. + +Jetzt f\"{u}llen Sie Ihren Autochanger mit, zum Beispiel, 6 leeren Tapes. + +Was muss passieren, damit Bacula auf diese Tapes zugreifen kann? + +Eine M\"{o}glichkeit ist, dass jedes Tape vorgelabelt wird. Starten Sie Bacula und +f\"{u}hren Sie das Console-Programm aus, innerhalb des Console-Programms verwenden Sie das Kommando {\bf label}: + +\footnotesize +\begin{verbatim} +./bconsole +Connecting to Director rufus:8101 +1000 OK: rufus-dir Version: 1.26 (4 October 2002) +*label +\end{verbatim} +\normalsize + +wird etwas \"{a}hnliches wie hier ausgeben: + +\footnotesize +\begin{verbatim} +Using default Catalog name=BackupDB DB=bacula +The defined Storage resources are: + 1: Autochanger + 2: File +Select Storage resource (1-2): 1 +\end{verbatim} +\normalsize + +W\"{a}hlen Sie den Autochanger und es erscheint: + +\footnotesize +\begin{verbatim} +Enter new Volume name: TestVolume1 +Enter slot (0 for none): 1 +\end{verbatim} +\normalsize + +geben Sie {\bf Testvolume1} f\"{u}r den Tape-Namen ein und {\bf 1} f\"{u}r den Slot. +Bacula fragt: + +\footnotesize +\begin{verbatim} +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 +\end{verbatim} +\normalsize + +W\"{a}hlen Sie den Default Pool. Das wird automatisch gemacht, wenn Sie nur einen Pool haben. +Nun wird Bacula damit beginnen, das ben\"{o}tigte Laufwerk zu entladen und +das Tape aus Slot 1 in das Laufwerk zu laden und als Testvolume1 zu labeln. +In diesem Beispiel war kein Tape im Laufwerk, die Ausgabe sieht dann so aus: + +\footnotesize +\begin{verbatim} +Connecting to Storage daemon Autochanger at localhost:9103 ... +Sending label command ... +3903 Issuing autochanger "load slot 1" command. +3000 OK label. Volume=TestVolume1 Device=/dev/nst0 +Media record for Volume=TestVolume1 successfully created. +Requesting mount Autochanger ... +3001 Device /dev/nst0 is mounted with Volume TestVolume1 +You have messages. +* +\end{verbatim} +\normalsize + +Sie k\"{o}nnen dann damit fortfahren, die andern Tapes zu labeln. +Die Ausgaben werden etwas anders aussehen, weil Bacula dann erst das +vorherige, gerade gelabelte Tape, aus dem Laufwerk entladen muss, +bevor das neue Tape geladen werden kann. + +Wenn Sie alle Tapes gelabelt haben, wird Bacula sie automatisch verwenden, wenn sie ben\"{o}tigt werden. + +Um nachzusehen, wie die Tapes gelabelt sind, geben Sie einfach das Console-Kommando {\bf list volumes} ein, +das wird eine Liste, wie die folgende ausgeben: + +\footnotesize +\begin{verbatim} +*{\bf list volumes} +Using default Catalog name=BackupDB DB=bacula +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| MedId | VolName | MedTyp | VolStat | Bites | LstWrt | VolReten | Recyc | Slot | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| 1 | TestVol1 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 1 | +| 2 | TestVol2 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 2 | +| 3 | TestVol3 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 3 | +| ... | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +\end{verbatim} +\normalsize + +\label{Barcodes} +\section{Barcode Unterst\"{u}tzung} +\index[general]{Unterst\"{u}tzung!Barcode } +\index[general]{Barcode Unterst\"{u}tzung } + +Bacula unterst\"{u}tzt Barcodes mit zwei Console-Kommandos: +{\bf label barcodes} und {\bf update slots}. + +Das Kommando {\bf label barcodes} bewirkt, dass Bacula mittels des {\bf mtx-changer} {\bf list} +Kommandos die Barcodes der Tapes in allen Slots einliest. Danach wird jedes Tape, eins nach dem anderen, +mit dem Namen gelabelt, den der Barcode enth\"{a}lt. + +Das {\bf update slots} Kommando holt, \"{u}ber das {\bf mtx-changer} Script, zuerst eine Liste aller Tapes und deren Barcodes. Dann versucht es im Katalog die entsprechenden Tapes zu finden und aktualisiert +den {\bf Slot} und das {\bf InChanger} Flag. Falls das Tape nicht im Katalog gelistet ist, passiert nichts. +Diese Kommando wird ben\"{o}tigt, um die Volume-Eintr\"{a}ge im Katalog mit den tats\"{a}chlich im Autochanger +zur Verf\"{u}gung stehenden Tapes abzugleichen, nachdem Tapes gewechselt oder in andere Slots verschoben wurden. + +Die Angabe des {\bf Cleaning Prefix} kann in der Pool-Konfiguration benutzt werden, um anzugeben welche +Tapes (Barcodes) im Katalog mit dem {\bf VolStatus} {\bf Cleaning} gekennzeichnet werden sollen. +Das verhindert, dass Bacula versucht auf dem Tape zu schreiben. + +\label{interface} +\section{Bacula Autochanger Schnittstelle} +\index[general]{Schnittstelle!Bacula Autochanger } +\index[general]{Bacula Autochanger Schnittstelle } + +Bacula ruft das Autochanger-Script auf, dass Sie als {\bf Changer Command} angegeben haben. +Normalerweise ist es das von Bacula mitgelieferte {\bf mtx-changer} Script, +aber tats\"{a}chlich kann es auch jedes andere Programm sein. +Die einzige Anforderung ist, dass es die Kommandos die Bacula benutzt, +{\bf loaded}, {\bf load}, {\bf unload}, {\bf list} und {\bf slots}, unterst\"{u}tzt. +Ausserdem muss jedes dieser Kommandos genau diese R\"{u}ckgabewerte liefern: + +\footnotesize +\begin{verbatim} +- Die momentan benutzten Autochanger-Kommandos sind: + loaded -- gibt, ab 1 beginnend, die Nummer des im Laufwerk geladenen Slot zur\"{u}ck, + bzw. 0 wenn das Laufwerk leer ist. + load -- l\"{a}dt das Tape aus dem angegebenen Slot in das Laufwerk (einige Autochanger + ben\"{o}tigen eine 30-sek\"{u}ndige Pause nach diesem Kommando) + unload -- entl\"{a}dt das Tape aus dem Laufwerk zur\"{u}ck in den Slot + list -- gibt eine Zeile pro Tape im Autochanger aus. + Das Format ist: :. Wobei + der {\bf Slot} eine Zahl (nicht null) ist, die der Slot-Nummer entspricht, + und {\bf Barcode} ist, falls vorhanden, der Barcode des Tapes, + ansonsten ist {\bf Barcode} leer. + slots -- gibt die absolute Anzahl der Slots im Autochanger zur\"{u}ck. +\end{verbatim} +\normalsize + +Bacula \"{u}berpr\"{u}ft den R\"{u}ckgabewert des aufgerufenen Programms, +wenn er Null ist, werden die gelieferten Daten akzeptiert. +Wenn der R\"{u}ckgabewert nicht Null ist, wird eine entsprechende Fehlermeldung ausgegeben und +Bacula wird ein manuelles laden des Tapes in das Laufwerk erwarten. diff --git a/docs/manuals/de/concepts/bootstrap.tex b/docs/manuals/de/concepts/bootstrap.tex new file mode 100644 index 00000000..633c8942 --- /dev/null +++ b/docs/manuals/de/concepts/bootstrap.tex @@ -0,0 +1,415 @@ +%% +%% + +\chapter{Die Bootstrap-Datei} +\label{BootstrapChapter} +\index[general]{Datei!Bootstrap } +\index[general]{Bootstrap-Datei } + +Die Informationen in diesem Kapitel sollen Ihnen helfen, entweder eigene Bootstrap-Dateien +zu erstellen, oder die von Bacula erzeugten zu editieren. Da die Bootstrap-Datei automatisch beim ausf\"{u}hren des +\ilink{restore}{_ConsoleChapter} Console-Kommandos, oder wenn Sie \ilink{ Write Bootstrap}{writebootstrap} +in den Job-Eintr\"{a}gen der Director-Dienst-Konfiguration angeben, erzeugt wird, +brauchen Sie das genaue Format eigentlich nicht wissen. + +Die Bootstrap-Datei enth\"{a}lt Informationen im ASCII-Format, +die pr\"{a}zise angeben, welche Dateien wiederhergestellt werden sollen, auf welchem Volume sie liegen +und wo auf dem Volume. Es ist ein relativ kompaktes Format diese Informationen anzugeben, aber es ist +lesbar f\"{u}r Menschen und kann mit einem Texteditor ge\"{a}ndert werden. + +\section{Bootstrap-Datei Format} +\index[general]{Format!Bootstrap} +\index[general]{Bootstrap-Datei Format } + +Das generelle Format der Bootstrap-Datei ist: + +{\bf \lt{}Schl\"{u}sselwort\gt{} = \lt{}Wert\gt{}} + +Wobei jedes Schl\"{u}sselwort und sein Wert angeben, welche Dateien wiederhergestellt werden. +Genauer gesagt, das Schl\"{u}sselwort und sein Wert dienen dazu, zu limitieren welche +Dateien wiederhergestellt werden, sie verhalten sich wie ein Filter. +Das Fehlen eines Schl\"{u}sselwort bedeutet, dass alle Dateien angenommen werden. + +In der Bootstrap-Datei werden Leerzeilen und Zeilen beginnent mit {\#} ignoriert. + +Es existieren Schl\"{u}sselw\"{o}rter, die die Filterung nach Volume, Client, Job, Fileindex, Session ID, +Session Time usw. erlauben. + +Je mehr Schl\"{u}sselw\"{o}rter Sie angeben, desto genauer ist die Auswahl der Dateien, die wiederhergestellt werden. +Alle Schl\"{u}sselw\"{o}rter werden \"{u}ber {\bf UND} verkn\"{u}pft. + +Ein Beispiel: + +\footnotesize +\begin{verbatim} +Volume = Test-001 +VolSessionId = 1 +VolSessionTime = 108927638 +\end{verbatim} +\normalsize + +veranlasst den Storage-Dienst (oder das {\bf bextract} Programm), nur die Dateien wiederherzustellen, die +auf dem Volume Test-001 vorhanden sind {\bf UND} eine VolumeSessionID mit 1 haben {\bf UND} deren VolumeSessionTime +gleich 108927638 ist. + +Hier ist eine Liste aller erlaubten Schl\"{u}sselw\"{o}rter in der Reihenfolge in der sie auf +die auf dem Volume befindlichen Daten angewendet werden: + +\begin{description} + +\item [Volume] + \index[general]{Volume } + Dieser Wert gibt an, auf welches Volume die folgenden Schl\"{u}sselw\"{o}rter angewendet werden sollen. + Falls in der Bootstrap-Datei ein zweites Volume angegeben wird, beziehen sich die darauf folgenden + Schl\"{u}sselw\"{o}rter auf dieses Volume. + Wenn der Name des Volumes Leerzeichen enth\"{a}lt, muss er in Anf\"{u}hrungszeichen gesetzt werden. + Mindestens ein Volume muss angegeben werden. + +\item [Count] + \index[general]{Count} + Dieser Wert ist die Gesamtanzahl der Dateien, die von dem Volume gelesen werden sollen. + Daran erkennt der Storage-Dienst, wann er das Lesen beenden soll. + Dieser Wert ist optional. + +\item [VolFile] + \index[general]{VolFile} + Dieser Wert gibt eine Dateinummer oder eine Liste bzw. einen Bereich von Dateinummern an, + die auf dem aktuellen Volume gefunden werden soll. Die Dateinummer stellt die physikalische + Datei auf dem Volume da, wo die Daten gespeichert sind. Bei einem Tape wird dieser Wert benutzt, + um das Band richtig zu positionieren und wenn das Laufwerk die letzte angegebene Datei gelesen hat, + wird der Lesevorgang gestoppt. + +\item [VolBlock] + \index[general]{VolBlock} + Dieser Wert gibt eine Blocknummer oder eine Liste bzw. einen Bereich von Blocknummern an, + die auf dem aktuellen Volume gefunden werden soll. Die Blocknummer stellt die physikalischen + Bl\"{o}cke auf dem Volume da, wo die Daten gespeichert sind. + +\item [VolSessionTime] + \index[general]{VolSessionTime } + Dieser Wert gibt die Volume-Session-Zeit an, die auf dem aktuellen Volume gefunden werden soll. + +\item [VolSessionId] + \index[general]{VolSessionId } + Dieser Wert gibt eine Volume-Session-ID oder eine Liste bzw. einen Bereich von Volume-Sesion-IDs an, + die auf dem aktuellen Volume gefunden werden soll. Jedes Paar aus Volume-Session-ID und Volume-Session-Zeit, + stimmt mit einem einzelnen Job \"{u}berein, der auf dem Volume gespeichert ist. + +\item [JobId] + \index[general]{JobId } + Dieser Wert gibt eine Job-ID oder eine Liste bzw. einen Bereich von Job-Ids an, + die auf dem aktuellen Volume gefunden werden soll. Beachten Sie bitte, dass die Job-ID + eventuell nicht eindeutig ist, falls Sie mehrere Director-Dienste haben, oder falls Sie + Ihre Datenbank neu initialisiert haben sollten. Der Job-ID-Filter funktioniert nicht, wenn + Sie mehrere Jobs gleichzeitig haben laufen lassen. + Dieser Wert ist optional und wird von Bacula nicht zum zur\"{u}cksichern ben\"{o}tigt. + +\item [Job] + \index[general]{Job } + Dieser Wert gibt einen Job-Namen oder eine Liste von Job-Namen an, die auf dem aktuellen + Volume gefunden werden sollen. Der Job-Name stimmt mit einem einzigartigen Paar aus Volume-Session-Zeit + und VolumeSessionID \"{u}berein, allerdings ist er f\"{u}r Menschen ein bischen leichter zu lesen. + Gew\"{o}hnliche regul\"{a}re Ausdr\"{u}cke k\"{o}nnen benutzt werden, um einen entsprechenden Job-Namen zu finden. + Der Job-Name-Filter funktioniert nicht, wenn Sie mehrere Jobs gleichzeitig haben laufen lassen. + Dieser Wert ist optional und wird von Bacula nicht zum zur\"{u}cksichern ben\"{o}tigt. + +\item [Client] + \index[general]{Client } + Dieser Wert gibt einen Client-Namen oder eine Liste von Client-Namen an, dia auf dem aktuellen + Volume gefunden werden soll. Gew\"{o}hnliche regul\"{a}re Ausdr\"{u}cke k\"{o}nnen benutzt werden, + um einen entsprechenden Job-Namen zu finden. Der Client-Filter funktioniert nicht, + wenn Sie mehrere Jobs gleichzeitig haben laufen lassen. + Dieser Wert ist optional und wird von Bacula nicht zum zur\"{u}cksichern ben\"{o}tigt. + +\item [FileIndex] + \index[general]{FileIndex } + Dieser Wert gibt einen File-Index oder eine Liste bzw. einen Bereich von File-Indexen an, + die auf dem aktuellen Volume gefunden werden soll. Jedes File (Datei) das auf einem Volume gespeichert ist, + hat f\"{u}r seine Session einen einzigartigen File-Index. Bei jeder Session wird f\"{u}r das erste + gespeicherte File der File-Index auf eins gesetzt und dann mit jedem weiteren File um eins erh\"{o}ht. + + F\"{u}r ein beliebiges Volume bedeutet das, dass die drei Werte von Volume-Session-ID, Volume-Session-Time + und File-Index zusammen eine einzelne einzigartige Datei auf einem Volume angeben. Diese Datei ist eventuell + mehrfach auf dem Volume vorhanden, aber f\"{u}r jedes Vorkommen gibt es eine einzigartige Kombination + dieser drei Werte. Diese drei Werte sind f\"{u}r jede Datei in der Katalog-Datenbank gespeichert. + + Um eine Datei wiederherzustellen, ist die Angabe eines Wertes (oder einer Liste von File-Indexen) + erforderlich. + +\item [Slot] + \index[general]{Slot } + Dieser Wert gibt den Autochanger-Slot an. F\"{u}r jedes Volume darf nur ein Slot angegeben werden. + +\item [Stream] + \index[general]{Stream } + Dieser Wert gibt einen Stream (Strom) oder eine Liste bzw. einen Bereich von Streams an. + Solange Sie nicht wirklich wissen, was Sie tun, (wenn Sie das interne Arbeiten von Bacula kennen), + sollten Sie auf diese Angabe verzichten. + Dieser Wert ist optional und wird von Bacula nicht zum zur\"{u}cksichern ben\"{o}tigt. + +\item [*JobType] + \index[general]{*JobType } + Noch nicht implementiert. + +\item [*JobLevel] + \index[general]{*JobLevel } + Noch nicht implementiert. +\end{description} + +Bei der Angabe des Volume ist zu bedenken, dass dies der erste Parameter sein muss. +Alle anderen Parameter k\"{o}nnen in beliebiger Reihenfolge und Anzahl hinter einem +Volume-Eintrag angegeben werden. + +Mehrere Volume-Eintr\"{a}ge k\"{o}nnen in der selben Bootstrap-Datei vorkommen, +aber mit jedem Vorkommen beginnt ein neuer Satz an Filter, g\"{u}ltig f\"{u}r +das abgegebene Volume. + +Beim verarbeiten der Bootstrap-Datei werden alle Schl\"{u}sselw\"{o}rter +unterhalb eines Volume-Eintrags mit {\bf UND} verkn\"{u}pft. +Also wird: + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +auf alle Dateien auf dem Volume Test-01 {\bf UND} von Client My machine +{\bf UND} mit dem Fileindex 1 passen. + +Mehrfach angegebene Schl\"{u}sselw\"{o}rter werden mit {\bf ODER} verkn\"{u}pft. +Also wird: + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +Client = "Backup machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +auf alle Dateien auf dem Volume Test-01 {\bf UND} von Client My machine +{\bf ODER} vom Client Backup machine {\bf UND} mit dem Fileindex 1 passen. + +F\"{u}r Zahlenwerte k\"{o}nnen Sie einen Bereich oder eine Liste angeben, +f\"{u}r alle anderen Parameter, bis auf Volumes, nur eine Liste. +Eine Liste ist gleichbedeutend mit mehrfachen Angaben eines Parameters. +Ein Beispiel + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine", "Backup machine" +FileIndex = 1-20, 35 +\end{verbatim} +\normalsize + +passt auf alle Dateien auf dem Volume Test-01 {\bf UND} von Client My machine +{\bf ODER} vom Client Backup machine {\bf UND} mit dem Fileindex 1 {\bf ODER} +2 {\bf ODER} 3 ... {\bf ODER} 20 {\bf ODER} 35. + +Wie oben erw\"{a}hnt, k\"{o}nnen mehrere Volume-Eintr\"{a}ge in der selben +Bootstrap-Datei stehen. Jedes Vorkommen eines Volume-Eintrags beginnt einen neuen +Satz an Filterregeln der auf dem angegebenen Volume angewendet wird und mit weiteren +Volume-Eintr\"{a}gen \"{u}ber {\bf ODER} verkn\"{u}pft wird. + +Als ein Beispiel nehmen wir an, dass wir, mit dem Console-Kommando {\bf query} , +nach dem Satz Volumes fragen, die ben\"{o}tigt werden, um alle Dateien des Clients Rufus +wiederherstellen zu k\"{o}nnen: + +\footnotesize +\begin{verbatim} +Using default Catalog name=MySQL DB=bacula +*query +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 10 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: +Choose a query (1-8): 8 +Enter Client Name: Rufus ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 154 | 2002-05-30 12:08 | test-02 | 0 | 1 | 1022753312 | +| 202 | 2002-06-15 10:16 | test-02 | 0 | 2 | 1024128917 | +| 203 | 2002-06-15 11:12 | test-02 | 3 | 1 | 1024132350 | +| 204 | 2002-06-18 08:11 | test-02 | 4 | 1 | 1024380678 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +Die Ausgabe zeigt uns, dass wir vier Jobs wiederherstellen m\"{u}ssen. +Der erste ist eine vollst\"{a}ndige Sicherung, und die drei folgenden sind inkrementelle Sicherungen. + +Die folgende Bootstrap-Datei wird ben\"{o}tigt um alle Dateien wiederherzustellen: + +\footnotesize +\begin{verbatim} +Volume=test-02 +VolSessionId=1 +VolSessionTime=1022753312 +Volume=test-02 +VolSessionId=2 +VolSessionTime=1024128917 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024132350 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024380678 +\end{verbatim} +\normalsize + +Als letztes Beispiel nehmen wir an, dass die erste vollst\"{a}ndige Sicherung sich +\"{u}ber zwei verschiedene Volumes erstreckt. Die Ausgabe des Console-Kommandos +{\bf query} sieht eventuell so aus: + +\footnotesize +\begin{verbatim} ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 242 | 2002-06-25 16:50 | File0003 | 0 | 1 | 1025016612 | +| 242 | 2002-06-25 16:50 | File0004 | 0 | 1 | 1025016612 | +| 243 | 2002-06-25 16:52 | File0005 | 0 | 2 | 1025016612 | +| 246 | 2002-06-25 19:19 | File0006 | 0 | 2 | 1025025494 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +und die folgende Bootstrap-Datei wird ben\"{o}tigt, um diese Dateien wiederherzustellen: + +\footnotesize +\begin{verbatim} +Volume=File0003 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0004 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0005 +VolSessionId=2 +VolSessionTime=1025016612 +Volume=File0006 +VolSessionId=2 +VolSessionTime=1025025494 +\end{verbatim} +\normalsize + +\section{automatische Erzeugung der Bootstrap-Datei} +\index[general]{Datei!automatische Erzeugung der Bootstrap-} +\index[general]{automatische Erzeugung der Bootstrap-Datei } + + +Eine Sache ist vermutlich wissenswert: die Bootstrap-Dateien die automatisch +am Ende eines jeden Jobs erzeugt werden, sind nicht so optimiert wie die, die +durch das Console-Kommando {\bf restore} erzeugt werden. +Das ist so, weil die Bootstrap-Dateien, die am Ende des Jobs erstellt werden, +alle Dateien enthalten, die f\"{u}r diesen Job auf das Volume geschrieben wurden. +Die Konsequenz ist, dass alle Dateien die w\"{a}rend eines inkrementellen oder differenziellen +Jobs geschrieben wurden, beim Wiederherstellen zun\"{a}chst von der vollst\"{a}ndigen Sicherung +wiederhergestellt werden und dann von der inkrementellen oder differenziellen Sicherung. + +Wenn die Bootstrap-Datei f\"{u}r die Wiederherstellung erstellt wird, +wird immer nur eine Version der Datei (die aktuellste) zur Wiederherstellung aufgelistet. + +Falls Ihr Rechner noch ein bischen Zeit \"{u}brig hat, k\"{o}nnen Sie Ihre +Bootstrap-Dateien optimieren, indem Sie das folgende tun: + +\footnotesize +\begin{verbatim} + ./bconsole + restore client=xxx select all + done + no + quit + Backup bootstrap file. +\end{verbatim} +\normalsize + +Das wird allerdings nicht funktionieren, wenn Ihr Client mehrere Filesets hat, +denn dann wird noch eine weitere Eingabe erforderlich. +Das Console-Kommando {\bf restore client=xxx select all} erstellt den Restore-Dateibaum +und w\"{a}hlt alle Dateien aus, {\bf done} beendet den Auswahlmodus, dann wird die Bootstrap-Datei f\"{u}r diesen +Wiederherstellungs-Job geschrieben. +Das {\bf no} beantwortet die Frage {\bf Do you want to run this (yes/mod/no)}. +{\bf quit} beendet das Console-Programm, danach kann die neu erstellte Bootstrap-Datei gesichert werden. + +\label{bscanBootstrap} +\section{Bootstrap-Datei f\"{u}r bscan} +\index[general]{bscan} +\index[general]{bscan!Bootstrap-Datei} +\index[general]{bscan Bootstrap-Datei} +Wenn Sie mit dem bscan-Programm sehr viele Volumes abfragen m\"{u}ssen, +wird Ihr Kommando eventuell das Limit der Kommandozeilel\"{a}nge \"{u}berschreiten (511 Zeichen). +In dem Fall, k\"{o}nnen Sie eine einfache Bootstrap-Datei erzeugen, die nur Volume-Namen enth\"{a}lt. +Ein Beispiel: + +\footnotesize +\begin{verbatim} +Volume="Vol001" +Volume="Vol002" +Volume="Vol003" +Volume="Vol004" +Volume="Vol005" +\end{verbatim} +\normalsize + + +\section{ein weiteres Beispiel der Bootstrap-Datei} +\index[general]{Beispiel ein weiteres!Bootstrap-Datei } +\index[general]{ein weiteres Beispiel der Bootstrap-Datei } + +Wenn Sie nur einen einzigen Job vom Volume lesen wollen, k\"{o}nnen Sie das +durch ausw\"{a}hlen der Job-Id tun (Funktion nicht getestet), oder besser noch, +Sie geben die VolumeSessionTime und VolumeSessionID an, falls Sie sie wissen. +(Die beiden Werte werden auf dem Job-Report ausgegeben und sind in der Katalog-Datenbank +zu finden.) +Die VolumeSessionTime und VolumeSessionID anzugeben ist auch die Art, +wie Bacula Wiederherstellungen durchf\"{u}hrt. +Eine Bootstrap-Datei kann dann wie folgt aussehen: + +\footnotesize +\begin{verbatim} +Volume="Vol001" +VolSessionId=10 +VolSessionTime=1080847820 +\end{verbatim} +\normalsize + +Wenn Sie wissen, wie viele Dateien gesichert wurden (siehe den Job-Report), +k\"{o}nnen Sie die Auswahl enorm beschleunigen, indem Sie der Bootstrap-Datei +folgendes hinzuf\"{u}gen (angenommen es waren 157 Dateien): + +\footnotesize +\begin{verbatim} +FileIndex=1-157 +Count=157 +\end{verbatim} +\normalsize + +Letztendlich, wenn Sie auch die File-Nummer wissen, wo auf dem Volume die +ausgew\"{a}hlten Dateien liegen, k\"{o}nnen Sie das bcopy-Programm veranlassen, +zum richtigen File auf dem Volumen zu springen, ohne jeden Eintrag lesen zu m\"{u}ssen: + +\footnotesize +\begin{verbatim} +VolFile=20 +\end{verbatim} +\normalsize + +Bootstrap-Dateien sind weder magisch noch kompliziert. Sie zu lesen und Bacula sinnvoll mit ihnen +arbeiten zu lassen *ist* magisch, aber darum brauchen Sie sich nicht k\"{u}mmern. + +Wenn Sie eine *echte* Bootstrap-Datei sehen wollen, starten sie das Console-Programm und geben Sie +{\bf restore} ein, w\"{a}hlen ein paar Dateien aus und antworten mit {\bf no}, +wenn Sie gefragt werden, ob Sie die Wiederherstellung starten wollen. Dann finden Sie die Bootstrap-Datei +im Arbeitsverzeichnis des Director-Dienstes (z.B. unter /var/lib/bacula/backup-dir.restore.2.bsr). diff --git a/docs/manuals/de/concepts/bugs.tex b/docs/manuals/de/concepts/bugs.tex new file mode 100644 index 00000000..6f4b9b6a --- /dev/null +++ b/docs/manuals/de/concepts/bugs.tex @@ -0,0 +1,19 @@ +%% +%% + +\section{Bacula Bugs} +\label{BugsChapter} +\index[general]{Bacula Bugs } +\index[general]{Bugs!Bacula } + +Zum Gl\"{u}ck gibt es in Bacula nicht sehr viele Programmfehler (Bugs), +aber dank Dan Langille haben wir eine \elink{Bug-Datenbank}{http://bugs.bacula.org}, +wo Fehler gemeldet werden k\"{o}nnen. Wenn ein Fehler behoben ist, wird normalerweise ein +Programmst\"{u}ck das den Fehler korrigiert (Patch), auf der Seite des Fehlerberichts +ver\"{o}ffentlicht. + +Das Verzeichnis {\bf patches} im aktuellen SVN enth\"{a}lt eine Liste aller Programmkorrekturen +die f\"{u}r \"{a}ltere Bacula-Versionen ver\"{o}ffentlicht wurden. + +Eine "grobe" \"{U}bersicht der momentanen Arbeit und bekannter Probleme befindet sich +auch in der Datei {\bf kernstodo} im Hauptverzeichnis der Bacula-Programmquellen. diff --git a/docs/manuals/de/concepts/check_tex.pl b/docs/manuals/de/concepts/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/concepts/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/concepts/concepts.tex b/docs/manuals/de/concepts/concepts.tex new file mode 100644 index 00000000..c4f4b08c --- /dev/null +++ b/docs/manuals/de/concepts/concepts.tex @@ -0,0 +1,115 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{dir}{ddx}{dnd}{Director Index} +\newindex{fd}{fdx}{fnd}{File Daemon Index} +\newindex{sd}{sdx}{snd}{Storage Daemon Index} +\newindex{console}{cdx}{cnd}{Console Index} +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Concepts and Overview Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\pagenumbering{roman} +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\markboth{Bacula Manual}{} +\pagenumbering{arabic} +\include{general} +\include{state} +\include{requirements} +\include{supportedoses} +\include{supporteddrives} +\include{tutorial} +\include{restore} +\include{recycling} +\include{disk} +\include{dvd} +\include{pools} +\include{migration} +\include{strategies} +\include{autochangers} +\include{supportedchangers} +\include{spooling} +\include{python} +\include{ansi-labels} +\include{win32} +\include{rescue} +\include{tls} +\include{dataencryption} +\include{verify} +\include{bootstrap} +\include{license} +\include{fdl} +\include{gpl} +\include{lesser} +\include{projects} +\include{thanks} +\include{bugs} +\include{vars} +\include{stunnel} + +% pull in the index +\clearpage +\printindex[general] +\printindex[dir] +\printindex[fd] +\printindex[sd] +\printindex[console] + +\end{document} diff --git a/docs/manuals/de/concepts/dataencryption.tex b/docs/manuals/de/concepts/dataencryption.tex new file mode 100644 index 00000000..34b050fe --- /dev/null +++ b/docs/manuals/de/concepts/dataencryption.tex @@ -0,0 +1,195 @@ + +\chapter{Data Encryption} +\label{DataEncryption} +\index[general]{Data Encryption} +\index[general]{Encryption!Data} +\index[general]{Data Encryption} + +Bacula permits file data encryption and signing within the File Daemon (or +Client) prior to sending data to the Storage Daemon. Upon restoration, +file signatures are validated and any mismatches are reported. At no time +does the Director or the Storage Daemon have access to unencrypted file +contents. + + +It is very important to specify what this implementation does NOT +do: +\begin{itemize} +\item There is one important restore problem to be aware of, namely, it's + possible for the director to restore new keys or a Bacula configuration + file to the client, and thus force later backups to be made with a + compromised key and/or with no encryption at all. You can avoid this by + not not changing the location of the keys in your Bacula File daemon + configuration file, and not changing your File daemon keys. If you do + change either one, you must ensure that no restore is done that restores + the old configuration or the old keys. In general, the worst effect of + this will be that you can no longer connect the File daemon. + +\item The implementation does not encrypt file metadata such as file path + names, permissions, and ownership. Extended attributes are also currently + not encrypted. However, Mac OS X resource forks are encrypted. +\end{itemize} + +Encryption and signing are implemented using RSA private keys coupled with +self-signed x509 public certificates. This is also sometimes known as PKI +or Public Key Infrastructure. + +Each File Daemon should be given its own unique private/public key pair. +In addition to this key pair, any number of "Master Keys" may be specified +-- these are key pairs that may be used to decrypt any backups should the +File Daemon key be lost. Only the Master Key's public certificate should +be made available to the File Daemon. Under no circumstances should the +Master Private Key be shared or stored on the Client machine. + +The Master Keys should be backed up to a secure location, such as a CD +placed in a in a fire-proof safe or bank safety deposit box. The Master +Keys should never be kept on the same machine as the Storage Daemon or +Director if you are worried about an unauthorized party compromising either +machine and accessing your encrypted backups. + +While less critical than the Master Keys, File Daemon Keys are also a prime +candidate for off-site backups; burn the key pair to a CD and send the CD +home with the owner of the machine. + +NOTE!!! If you lose your encryption keys, backups will be unrecoverable. +{\bf ALWAYS} store a copy of your master keys in a secure, off-site location. + +The basic algorithm used for each backup session (Job) is: +\begin{enumerate} +\item The File daemon generates a session key. +\item The FD encrypts that session key via PKE for all recipients (the file +daemon, any master keys). +\item The FD uses that session key to perform symmetric encryption on the data. +\end{enumerate} + + +\section{Building Bacula with Encryption Support} +\index[general]{Building Bacula with Encryption Support} + +The configuration option for enabling OpenSSL encryption support has not changed +since Bacula 1.38. To build Bacula with encryption support, you will need +the OpenSSL libraries and headers installed. When configuring Bacula, use: + +\begin{verbatim} + ./configure --with-openssl ... +\end{verbatim} + +\section{Encryption Technical Details} +\index[general]{Encryption Technical Details} + +The implementation uses 128bit AES-CBC, with RSA encrypted symmetric +session keys. The RSA key is user supplied. +If you are running OpenSSL 0.9.8 or later, the signed file hash uses +SHA-256 -- otherwise, SHA-1 is used. + +End-user configuration settings for the algorithms are not currently +exposed -- only the algorithms listed above are used. However, the +data written to Volume supports arbitrary symmetric, asymmetric, and +digest algorithms for future extensibility, and the back-end +implementation currently supports: + +\begin{verbatim} +Symmetric Encryption: + - 128, 192, and 256-bit AES-CBC + - Blowfish-CBC + +Asymmetric Encryption (used to encrypt symmetric session keys): + - RSA + +Digest Algorithms: + - MD5 + - SHA1 + - SHA256 + - SHA512 +\end{verbatim} + +The various algorithms are exposed via an entirely re-usable, +OpenSSL-agnostic API (ie, it is possible to drop in a new encryption +backend). The Volume format is DER-encoded ASN.1, modeled after the +Cryptographic Message Syntax from RFC 3852. Unfortunately, using CMS +directly was not possible, as at the time of coding a free software +streaming DER decoder/encoder was not available. + + +\section{Decrypting with a Master Key} +\index[general]{Decrypting with a Master Key} + +It is preferable to retain a secure, non-encrypted copy of the +client's own encryption keypair. However, should you lose the +client's keypair, recovery with the master keypair is possible. + +You must: +\begin{itemize} +\item Concatenate the master private and public key into a single + keypair file, ie: + cat master.key master.cert >master.keypair + +\item 2) Set the PKI Keypair statement in your bacula configuration file: + +\begin{verbatim} + PKI Keypair = master.keypair +\end{verbatim} + +\item Start the restore. The master keypair will be used to decrypt + the file data. + +\end{itemize} + + +\section{Generating Private/Public Encryption Keys} +\index[general]{Generating Private/Public Encryption Keypairs} + +Generate a Master Key Pair with: + +\footnotesize +\begin{verbatim} + openssl genrsa -out master.key 2048 + openssl req -new -key master.key -x509 -out master.cert +\end{verbatim} +\normalsize + +Generate a File Daemon Key Pair for each FD: + +\footnotesize +\begin{verbatim} + openssl genrsa -out fd-example.key 2048 + openssl req -new -key fd-example.key -x509 -out fd-example.cert + cat fd-example.key fd-example.cert >fd-example.pem +\end{verbatim} +\normalsize + +Note, there seems to be a lot of confusion around the file extensions given +to these keys. For example, a .pem file can contain all the following: +private keys (RSA and DSA), public keys (RSA and DSA) and (x509) certificates. +It is the default format for OpenSSL. It stores data Base64 encoded DER format, +surrounded by ASCII headers, so is suitable for text mode transfers between +systems. A .pem file may contain any number of keys either public or +private. We use it in cases where there is both a public and a private +key. + +Typically, above we have used the .cert extension to refer to X509 +certificate encoding that contains only a single public key. + + +\section{Example Data Encryption Configuration} +\index[general]{Example!File Daemon Configuration File} +\index[general]{Example!Data Encryption Configuration File} +\index[general]{Example Data Encryption Configuration} + +{\bf bacula-fd.conf} +\footnotesize +\begin{verbatim} +FileDaemon { + Name = example-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /var/bacula/working + Pid Directory = /var/run + Maximum Concurrent Jobs = 20 + + PKI Signatures = Yes # Enable Data Signing + PKI Encryption = Yes # Enable Data Encryption + PKI Keypair = "/etc/bacula/fd-example.pem" # Public and Private Keys + PKI Master Key = "/etc/bacula/master.cert" # ONLY the Public Key +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/concepts/disk.tex b/docs/manuals/de/concepts/disk.tex new file mode 100644 index 00000000..3f38be9f --- /dev/null +++ b/docs/manuals/de/concepts/disk.tex @@ -0,0 +1,789 @@ +%% +%% + +\chapter{Basic Volume Management} +\label{DiskChapter} +\index[general]{Basic Volume Management} +\index[general]{Management!Basic Volume} +\index[general]{Disk Volumes} + +This chapter presents most all the features needed to do Volume management. +Most of the concepts apply equally well to both tape and disk Volumes. +However, the chapter was originally written to explain backing up to disk, so +you will see it is slanted in that direction, but all the directives +presented here apply equally well whether your volume is disk or tape. + +If you have a lot of hard disk storage or you absolutely must have your +backups run within a small time window, you may want to direct Bacula to +backup to disk Volumes rather than tape Volumes. This chapter is intended to +give you some of the options that are available to you so that you can manage +either disk or tape volumes. + +\label{Concepts} +\section{Key Concepts and Resource Records} +\index[general]{Key Concepts and Resource Records } +\index[general]{Records!Key Concepts and Resource } + +Getting Bacula to write to disk rather than tape in the simplest case is +rather easy. In the Storage daemon's configuration file, you simply define an +{\bf Archive Device} to be a directory. For example, if you want your disk +backups to go into the directory {\bf /home/bacula/backups}, you could use the +following: + +\footnotesize +\begin{verbatim} +Device { + Name = FileBackup + Media Type = File + Archive Device = /home/bacula/backups + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +Assuming you have the appropriate {\bf Storage} resource in your Director's +configuration file that references the above Device resource, + +\footnotesize +\begin{verbatim} +Storage { + Name = FileStorage + Address = ... + Password = ... + Device = FileBackup + Media Type = File +} +\end{verbatim} +\normalsize + +Bacula will then write the archive to the file {\bf +/home/bacula/backups/\lt{}volume-name\gt{}} where \lt{}volume-name\gt{} is the +volume name of a Volume defined in the Pool. For example, if you have labeled +a Volume named {\bf Vol001}, Bacula will write to the file {\bf +/home/bacula/backups/Vol001}. Although you can later move the archive file to +another directory, you should not rename it or it will become unreadable by +Bacula. This is because each archive has the filename as part of the internal +label, and the internal label must agree with the system filename before +Bacula will use it. + +Although this is quite simple, there are a number of problems. The first is +that unless you specify otherwise, Bacula will always write to the same volume +until you run out of disk space. This problem is addressed below. + +In addition, if you want to use concurrent jobs that write to several +different volumes at the same time, you will need to understand a number +of other details. An example of such a configuration is given +at the end of this chapter under \ilink{Concurrent Disk +Jobs}{ConcurrentDiskJobs}. + +\subsection{Pool Options to Limit the Volume Usage} +\index[general]{Usage!Pool Options to Limit the Volume } +\index[general]{Pool Options to Limit the Volume Usage } + +Some of the options you have, all of which are specified in the Pool record, +are: + +\begin{itemize} +\item To write each Volume only once (i.e. one Job per Volume or file in this + case), use: + +{\bf UseVolumeOnce = yes}. + +\item To write nnn Jobs to each Volume, use: + + {\bf Maximum Volume Jobs = nnn}. + +\item To limit the maximum size of each Volume, use: + + {\bf Maximum Volume Bytes = mmmm}. + + Note, if you use disk volumes, with all versions up to and including + 1.39.28, you should probably limit the Volume size to some reasonable + value such as say 5GB. This is because during a restore, Bacula is + currently unable to seek to the proper place in a disk volume to restore + a file, which means that it must read all records up to where the + restore begins. If your Volumes are 50GB, reading half or more of the + volume could take quite a bit of time. Also, if you ever have a partial + hard disk failure, you are more likely to be able to recover more data + if they are in smaller Volumes. + +\item To limit the use time (i.e. write the Volume for a maximum of five days), + use: + +{\bf Volume Use Duration = ttt}. +\end{itemize} + +Note that although you probably would not want to limit the number of bytes on +a tape as you would on a disk Volume, the other options can be very useful in +limiting the time Bacula will use a particular Volume (be it tape or disk). +For example, the above directives can allow you to ensure that you rotate +through a set of daily Volumes if you wish. + +As mentioned above, each of those directives is specified in the Pool or +Pools that you use for your Volumes. In the case of {\bf Maximum Volume Job}, +{\bf Maximum Volume Bytes}, and {\bf Volume Use Duration}, you can actually +specify the desired value on a Volume by Volume basis. The value specified in +the Pool record becomes the default when labeling new Volumes. Once a Volume +has been created, it gets its own copy of the Pool defaults, and subsequently +changing the Pool will have no effect on existing Volumes. You can either +manually change the Volume values, or refresh them from the Pool defaults using +the {\bf update volume} command in the Console. As an example +of the use of one of the above, suppose your Pool resource contains: + +\footnotesize +\begin{verbatim} +Pool { + Name = File + Pool Type = Backup + Volume Use Duration = 23h +} +\end{verbatim} +\normalsize + +then if you run a backup once a day (every 24 hours), Bacula will use a new +Volume for each backup, because each Volume it writes can only be used for 23 hours +after the first write. Note, setting the use duration to 23 hours is not a very +good solution for tapes unless you have someone on-site during the weekends, +because Bacula will want a new Volume and no one will be present to mount it, +so no weekend backups will be done until Monday morning. + +\label{AutomaticLabeling} +\subsection{Automatic Volume Labeling} +\index[general]{Automatic Volume Labeling } +\index[general]{Labeling!Automatic Volume } + +Use of the above records brings up another problem -- that of labeling your +Volumes. For automated disk backup, you can either manually label each of your +Volumes, or you can have Bacula automatically label new Volumes when they are +needed. While, the automatic Volume labeling in version 1.30 and prior is a +bit simplistic, but it does allow for automation, the features added in +version 1.31 permit automatic creation of a wide variety of labels including +information from environment variables and special Bacula Counter variables. +In version 1.37 and later, it is probably much better to use Python scripting +and the NewVolume event since generating Volume labels in a Python script is +much easier than trying to figure out Counter variables. See the +\ilink{Python Scripting}{PythonChapter} chapter of this manual for more +details. + +Please note that automatic Volume labeling can also be used with tapes, but +it is not nearly so practical since the tapes must be pre-mounted. This +requires some user interaction. Automatic labeling from templates does NOT +work with autochangers since Bacula will not access unknown slots. There +are several methods of labeling all volumes in an autochanger magazine. +For more information on this, please see the \ilink{ +Autochanger}{AutochangersChapter} chapter of this manual. + +Automatic Volume labeling is enabled by making a change to both the Pool +resource (Director) and to the Device resource (Storage daemon) shown above. +In the case of the Pool resource, you must provide Bacula with a label format +that it will use to create new names. In the simplest form, the label format +is simply the Volume name, to which Bacula will append a four digit number. +This number starts at 0001 and is incremented for each Volume the catalog +contains. Thus if you modify your Pool resource to be: + +\footnotesize +\begin{verbatim} +Pool { + Name = File + Pool Type = Backup + Volume Use Duration = 23h + LabelFormat = "Vol" +} +\end{verbatim} +\normalsize + +Bacula will create Volume names Vol0001, Vol0002, and so on when new Volumes +are needed. Much more complex and elaborate labels can be created using +variable expansion defined in the +\ilink{Variable Expansion}{VarsChapter} chapter of this manual. + +The second change that is necessary to make automatic labeling work is to give +the Storage daemon permission to automatically label Volumes. Do so by adding +{\bf LabelMedia = yes} to the Device resource as follows: + +\footnotesize +\begin{verbatim} +Device { + Name = File + Media Type = File + Archive Device = /home/bacula/backups + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; + LabelMedia = yes +} +\end{verbatim} +\normalsize + +You can find more details of the {\bf Label Format} Pool record in +\ilink{Label Format}{Label} description of the Pool resource +records. + +\label{Recycling1} +\subsection{Restricting the Number of Volumes and Recycling} +\index[general]{Recycling!Restricting the Number of Volumes and Recycling} +\index[general]{Restricting the Number of Volumes and Recycling} + +Automatic labeling discussed above brings up the problem of Volume management. +With the above scheme, a new Volume will be created every day. If you have not +specified Retention periods, your Catalog will continue to fill keeping track +of all the files Bacula has backed up, and this procedure will create one new +archive file (Volume) every day. + +The tools Bacula gives you to help automatically manage these problems are the +following: + +\begin{enumerate} +\item Catalog file record retention periods, the + \ilink{File Retention = ttt}{FileRetention} record in the Client + resource. +\item Catalog job record retention periods, the + \ilink{Job Retention = ttt}{JobRetention} record in the Client + resource. +\item The + \ilink{ AutoPrune = yes}{AutoPrune} record in the Client resource + to permit application of the above two retention periods. +\item The + \ilink{ Volume Retention = ttt}{VolRetention} record in the Pool + resource. +\item The + \ilink{ AutoPrune = yes}{PoolAutoPrune} record in the Pool + resource to permit application of the Volume retention period. +\item The + \ilink{ Recycle = yes}{PoolRecycle} record in the Pool resource + to permit automatic recycling of Volumes whose Volume retention period has + expired. +\item The + \ilink{ Recycle Oldest Volume = yes}{RecycleOldest} record in the + Pool resource tells Bacula to Prune the oldest volume in the Pool, and if all + files were pruned to recycle this volume and use it. +\item The + \ilink{ Recycle Current Volume = yes}{RecycleCurrent} record in + the Pool resource tells Bacula to Prune the currently mounted volume in the + Pool, and if all files were pruned to recycle this volume and use it. +\item The + \ilink{ Purge Oldest Volume = yes}{PurgeOldest} record in the + Pool resource permits a forced recycling of the oldest Volume when a new one + is needed. {\bf N.B. This record ignores retention periods! We highly + recommend not to use this record, but instead use Recycle Oldest Volume} +\item The + \ilink{ Maximum Volumes = nnn}{MaxVolumes} record in the Pool + resource to limit the number of Volumes that can be created. +\end{enumerate} + +The first three records (File Retention, Job Retention, and AutoPrune) +determine the amount of time that Job and File records will remain in your +Catalog, and they are discussed in detail in the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +Volume Retention, AutoPrune, and Recycle determine how long Bacula will keep +your Volumes before reusing them, and they are also discussed in detail in the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +The Maximum Volumes record can also be used in conjunction with the Volume +Retention period to limit the total number of archive Volumes (files) that +Bacula will create. By setting an appropriate Volume Retention period, a +Volume will be purged just before it is needed and thus Bacula can cycle +through a fixed set of Volumes. Cycling through a fixed set of Volumes can +also be done by setting {\bf Recycle Oldest Volume = yes} or {\bf Recycle +Current Volume = yes}. In this case, when Bacula needs a new Volume, it will +prune the specified volume. + +\label{ConcurrentDiskJobs} +\section{Concurrent Disk Jobs} +\index[general]{Concurrent Disk Jobs} +Above, we discussed how you could have a single device named {\bf +FileBackup} that writes to volumes in {\bf /home/bacula/backups}. +You can, in fact, run multiple concurrent jobs using the +Storage definition given with this example, and all the jobs will +simultaneously write into the Volume that is being written. + +Now suppose you want to use multiple Pools, which means multiple +Volumes, or suppose you want each client to have its own Volume +and perhaps its own directory such as {\bf /home/bacula/client1} +and {\bf /home/bacula/client2} ... With the single Storage and Device +definition above, neither of these two is possible. Why? Because +Bacula disk storage follows the same rules as tape devices. Only +one Volume can be mounted on any Device at any time. If you want +to simultaneously write multiple Volumes, you will need multiple +Device resources in your bacula-sd.conf file, and thus multiple +Storage resources in your bacula-dir.conf. + +OK, so now you should understand that you need multiple Device definitions +in the case of different directories or different Pools, but you also +need to know that the catalog data that Bacula keeps contains only +the Media Type and not the specific storage device. This permits a tape +for example to be re-read on any compatible tape drive. The compatibility +being determined by the Media Type. The same applies to disk storage. +Since a volume that is written by a Device in say directory {\bf +/home/bacula/backups} cannot be read by a Device with an Archive Device +definition of {\bf /home/bacula/client1}, you will not be able to +restore all your files if you give both those devices +{\bf Media Type = File}. During the restore, Bacula will simply choose +the first available device, which may not be the correct one. If this +is confusing, just remember that the Directory has only the Media Type +and the Volume name. It does not know the {\bf Archive Device} (or the +full path) that is specified in the Storage daemon. Thus you must +explicitly tie your Volumes to the correct Device by using the Media Type. + +The example shown below shows a case where there are two clients, each +using its own Pool and storing their Volumes in different directories. + + +\label{Example2} +\section{An Example} +\index[general]{Example } + +The following example is not very practical, but can be used to demonstrate +the proof of concept in a relatively short period of time. The example +consists of a two clients that are backed up to a set of 12 archive files +(Volumes) for each client into different directories on the Storage +machine. Each Volume is used (written) only once, and there are four Full +saves done every hour (so the whole thing cycles around after three hours). + +What is key here is that each physical device on the Storage daemon +has a different Media Type. This allows the Director to choose the +correct device for restores ... + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { + Name = my-dir + QueryFile = "~/bacula/bin/query.sql" + PidDirectory = "~/bacula/working" + WorkingDirectory = "~/bacula/working" + Password = dir_password +} +Schedule { + Name = "FourPerHour" + Run = Level=Full hourly at 0:05 + Run = Level=Full hourly at 0:20 + Run = Level=Full hourly at 0:35 + Run = Level=Full hourly at 0:50 +} +Job { + Name = "RecycleExample" + Type = Backup + Level = Full + Client = Rufus + FileSet= "Example FileSet" + Messages = Standard + Storage = FileStorage + Pool = Recycle + Schedule = FourPerHour +} + +Job { + Name = "RecycleExample2" + Type = Backup + Level = Full + Client = Roxie + FileSet= "Example FileSet" + Messages = Standard + Storage = FileStorage1 + Pool = Recycle1 + Schedule = FourPerHour +} + +FileSet { + Name = "Example FileSet" + Include = compression=GZIP signature=SHA1 { + /home/kern/bacula/bin + } +} +Client { + Name = Rufus + Address = rufus + Catalog = BackupDB + Password = client_password +} + +Client { + Name = Roxie + Address = roxie + Catalog = BackupDB + Password = client1_password +} + +Storage { + Name = FileStorage + Address = rufus + Password = local_storage_password + Device = RecycleDir + Media Type = File +} + +Storage { + Name = FileStorage1 + Address = rufus + Password = local_storage_password + Device = RecycleDir1 + Media Type = File1 +} + +Catalog { + Name = BackupDB + dbname = bacula; user = bacula; password = "" +} +Messages { + Name = Standard + ... +} +Pool { + Name = Recycle + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Recycle-" + AutoPrune = yes + VolumeRetention = 2h + Maximum Volumes = 12 + Recycle = yes +} + +Pool { + Name = Recycle1 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Recycle1-" + AutoPrune = yes + VolumeRetention = 2h + Maximum Volumes = 12 + Recycle = yes +} + +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { + Name = my-sd + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" + MaximumConcurrentJobs = 10 +} +Director { + Name = my-dir + Password = local_storage_password +} +Device { + Name = RecycleDir + Media Type = File + Archive Device = /home/bacula/backups + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Device { + Name = RecycleDir1 + Media Type = File1 + Archive Device = /home/bacula/backups1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Messages { + Name = Standard + director = my-dir = all +} +\end{verbatim} +\normalsize + +With a little bit of work, you can change the above example into a weekly or +monthly cycle (take care about the amount of archive disk space used). + +\label{MultipleDisks} +\section{Backing up to Multiple Disks} +\index[general]{Disks!Backing up to Multiple } +\index[general]{Backing up to Multiple Disks } + +Bacula can, of course, use multiple disks, but in general, each disk must be a +separate Device specification in the Storage daemon's conf file, and you must +then select what clients to backup to each disk. You will also want to +give each Device specification a different Media Type so that during +a restore, Bacula will be able to find the appropriate drive. + +The situation is a bit more complicated if you want to treat two different +physical disk drives (or partitions) logically as a single drive, which +Bacula does not directly support. However, it is possible to back up your +data to multiple disks as if they were a single drive by linking the +Volumes from the first disk to the second disk. + +For example, assume that you have two disks named {\bf /disk1} and {\bf +/disk2}. If you then create a standard Storage daemon Device resource for +backing up to the first disk, it will look like the following: + +\footnotesize +\begin{verbatim} +Device { + Name = client1 + Media Type = File + Archive Device = /disk1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +Since there is no way to get the above Device resource to reference both {\bf +/disk1} and {\bf /disk2} we do it by pre-creating Volumes on /disk2 with the +following: + +\footnotesize +\begin{verbatim} +ln -s /disk2/Disk2-vol001 /disk1/Disk2-vol001 +ln -s /disk2/Disk2-vol002 /disk1/Disk2-vol002 +ln -s /disk2/Disk2-vol003 /disk1/Disk2-vol003 +... +\end{verbatim} +\normalsize + +At this point, you can label the Volumes as Volume {\bf Disk2-vol001}, {\bf +Disk2-vol002}, ... and Bacula will use them as if they were on /disk1 but +actually write the data to /disk2. The only minor inconvenience with this +method is that you must explicitly name the disks and cannot use automatic +labeling unless you arrange to have the labels exactly match the links you +have created. + +An important thing to know is that Bacula treats disks like tape drives +as much as it can. This means that you can only have a single Volume +mounted at one time on a disk as defined in your Device resource in +the Storage daemon's conf file. You can have multiple concurrent +jobs running that all write to the one Volume that is being used, but +if you want to have multiple concurrent jobs that are writing to +separate disks drives (or partitions), you will need to define +separate Device resources for each one, exactly as you would do for +two different tape drives. There is one fundamental difference, however. +The Volumes that you create on the two drives cannot be easily exchanged +as they can for a tape drive, because they are physically resident (already +mounted in a sense) on the particular drive. As a consequence, you will +probably want to give them different Media Types so that Bacula can +distinguish what Device resource to use during a restore. +An example would be the following: + +\footnotesize +\begin{verbatim} +Device { + Name = Disk1 + Media Type = File1 + Archive Device = /disk1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Device { + Name = Disk2 + Media Type = File2 + Archive Device = /disk2 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +With the above device definitions, you can run two concurrent +jobs each writing at the same time, one to {\bf /disk2} and the +other to {\bf /disk2}. The fact that you have given them different +Media Types will allow Bacula to quickly choose the correct +Storage resource in the Director when doing a restore. + +\label{MultipleClients} +\section{Considerations for Multiple Clients} +\index[general]{Clients!Considerations for Multiple } +\index[general]{Multiple Clients} + +If we take the above example and add a second Client, here are a few +considerations: + +\begin{itemize} +\item Although the second client can write to the same set of Volumes, you + will probably want to write to a different set. +\item You can write to a different set of Volumes by defining a second Pool, + which has a different name and a different {\bf LabelFormat}. +\item If you wish the Volumes for the second client to go into a different + directory (perhaps even on a different filesystem to spread the load), you + would do so by defining a second Device resource in the Storage daemon. The +{\bf Name} must be different, and the {\bf Archive Device} could be +different. To ensure that Volumes are never mixed from one pool to another, +you might also define a different MediaType (e.g. {\bf File1}). +\end{itemize} + +In this example, we have two clients, each with a different Pool and a +different number of archive files retained. They also write to different +directories with different Volume labeling. + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { + Name = my-dir + QueryFile = "~/bacula/bin/query.sql" + PidDirectory = "~/bacula/working" + WorkingDirectory = "~/bacula/working" + Password = dir_password +} +# Basic weekly schedule +Schedule { + Name = "WeeklySchedule" + Run = Level=Full fri at 1:30 + Run = Level=Incremental sat-thu at 1:30 +} +FileSet { + Name = "Example FileSet" + Include = compression=GZIP signature=SHA1 { + /home/kern/bacula/bin + } +} +Job { + Name = "Backup-client1" + Type = Backup + Level = Full + Client = client1 + FileSet= "Example FileSet" + Messages = Standard + Storage = File1 + Pool = client1 + Schedule = "WeeklySchedule" +} +Job { + Name = "Backup-client2" + Type = Backup + Level = Full + Client = client2 + FileSet= "Example FileSet" + Messages = Standard + Storage = File2 + Pool = client2 + Schedule = "WeeklySchedule" +} +Client { + Name = client1 + Address = client1 + Catalog = BackupDB + Password = client1_password + File Retention = 7d +} +Client { + Name = client2 + Address = client2 + Catalog = BackupDB + Password = client2_password +} +# Two Storage definitions with different Media Types +# permits different directories +Storage { + Name = File1 + Address = rufus + Password = local_storage_password + Device = client1 + Media Type = File1 +} +Storage { + Name = File2 + Address = rufus + Password = local_storage_password + Device = client2 + Media Type = File2 +} +Catalog { + Name = BackupDB + dbname = bacula; user = bacula; password = "" +} +Messages { + Name = Standard + ... +} +# Two pools permits different cycling periods and Volume names +# Cycle through 15 Volumes (two weeks) +Pool { + Name = client1 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Client1-" + AutoPrune = yes + VolumeRetention = 13d + Maximum Volumes = 15 + Recycle = yes +} +# Cycle through 8 Volumes (1 week) +Pool { + Name = client2 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Client2-" + AutoPrune = yes + VolumeRetention = 6d + Maximum Volumes = 8 + Recycle = yes +} +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { + Name = my-sd + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" + MaximumConcurrentJobs = 10 +} +Director { + Name = my-dir + Password = local_storage_password +} +# Archive directory for Client1 +Device { + Name = client1 + Media Type = File1 + Archive Device = /home/bacula/client1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +# Archive directory for Client2 +Device { + Name = client2 + Media Type = File2 + Archive Device = /home/bacula/client2 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +Messages { + Name = Standard + director = my-dir = all +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/concepts/do_echo b/docs/manuals/de/concepts/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/de/concepts/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/de/concepts/dvd.tex b/docs/manuals/de/concepts/dvd.tex new file mode 100644 index 00000000..f11e70d6 --- /dev/null +++ b/docs/manuals/de/concepts/dvd.tex @@ -0,0 +1,329 @@ +%% +%% + +\chapter{DVD Volumes} +\label{_DVDChapterStart} +\index[general]{DVD Volumes} +\index[general]{Writing DVDs} +\index[general]{DVD Writing} +\index[general]{Volumes!DVD} + +Bacula allows you to specify that you want to write to DVD. However, +this feature is implemented only in version 1.37 or later. +You may in fact write to DVD+RW, DVD+R, DVD-R, or DVD-RW +media. The actual process used by Bacula is to first write +the image to a spool directory, then when the Volume reaches +a certain size or, at your option, at the end of a Job, Bacula +will transfer the image from the spool directory to the +DVD. The actual work of transferring the image is done +by a script {\bf dvd-handler}, and the heart of that +script is a program called {\bf growisofs} which allows +creating or adding to a DVD ISO filesystem. + +You must have {\bf dvd+rw-tools} loaded on your system for DVD writing to +work. Please note that the original {\bf dvd+rw-tools} package does {\bf +NOT} work with Bacula. You must apply a patch which can be found in the +{\bf patches} directory of Bacula sources with the name +{\bf dvd+rw-tools-5.21.4.10.8.bacula.patch} for version 5.21 of the tools, +or patch {bf dvd+rw-tools-6.1.bacula.patch} if you have version 6.1 +on your system. Unfortunately, this requires you to build the dvd\_rw-tools +from source. + +Note, some Linux distros such as Debian dvd+rw-tools-7.0-4 package already +have the patch applied, so please check. + +The fact that Bacula cannot use the OS to write directly +to the DVD makes the whole process a bit more error prone than +writing to a disk or a tape, but nevertheless, it does work if you +use some care to set it up properly. However, at the current time +(version 1.39.30 -- 12 December 2006) we still consider this code to be +BETA quality. As a consequence, please do careful testing before relying +on DVD backups in production. + +The remainder of this chapter explains the various directives that you can +use to control the DVD writing. + +\label{DVDdirectives} +\section{DVD Specific SD Directives} +\index[general]{Directives!DVD} +\index[general]{DVD Specific SD Directives } + +The following directives are added to the Storage daemon's +Device resource. + +\begin{description} + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount } + You must set this directive to {\bf yes} for DVD-writers, and to {\bf no} for + all other devices (tapes/files). This directive indicates if the device + requires to be mounted using the {\bf Mount Command}. + To be able to write a DVD, the following directives must also be + defined: {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount Command} and + {\bf Write Part Command}. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + Directory where the device can be mounted. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + Command that must be executed to mount the device. Although the + device is written directly, the mount command is necessary in + order to determine the free space left on the DVD. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +However, if you have defined a mount point in /etc/fstab, you might be +able to use a mount command such as: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount /media/dvd" +\end{verbatim} +\normalsize + + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + Command that must be executed to unmount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +\item [Write Part Command = {\it name-string}] + \index[sd]{Write Part Command } + Command that must be executed to write a part to the device. Before the + command is executed, \%a is replaced with the Archive Device, \%m with the + Mount Point, \%e is replaced with 1 if we are writing the first part, + and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Write Part Command = "/path/dvd-handler %a write %e %v" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + +\item [Free Space Command = {\it name-string}] + \index[sd]{Free Space Command } + Command that must be executed to check how much free space is left on the + device. Before the command is executed,\%a is replaced with the Archive + Device. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Free Space Command = "/path/dvd-handler %a free" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-freespace is the Bacula supplied script file. + If you want to specify your own command, please look at the code in + dvd-handler to see what output Bacula expects from this command. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + If you do not set it, Bacula will expect there is always free space on the + device. + +\end{description} + +In addition to the directives specified above, you must also +specify the other standard Device resource directives. Please see the +sample DVD Device resource in the default bacula-sd.conf file. Be sure +to specify the raw device name for {\bf Archive Device}. It should +be a name such as {\bf /dev/cdrom} or {\bf /media/cdrecorder} or +{\bf /dev/dvd} depending on your system. It will not be a name such +as {\bf /mnt/cdrom}. + +Finally, for {\bf growisofs} to work, it must be able to lock +a certain amount of memory in RAM. If you have restrictions on +this function, you may have failures. Under {\bf bash}, you can +set this with the following command: + +\footnotesize +\begin{verbatim} +ulimit -l unlimited +\end{verbatim} +\normalsize + +\section{Edit Codes for DVD Directives} +\index[general]{Directives!DVD Edit Codes} +\index[general]{Edit Codes for DVD Directives } + +Before submitting the {\bf Mount Command}, {\bf Unmount Command}, +{\bf Write Part Command}, or {\bf Free Space Command} directives +to the operating system, Bacula performs character substitution of the +following characters: + +\footnotesize +\begin{verbatim} + %% = % + %a = Archive device name + %e = erase (set if cannot mount and first part) + %n = part number + %m = mount point + %v = last part name (i.e. filename) +\end{verbatim} +\normalsize + + + +\section{DVD Specific Director Directives} +\index[general]{Directives!DVD} +\index[general]{DVD Specific Director Directives } + +The following directives are added to the Director's Job resource. + +\label{WritePartAfterJob} +\begin{description} +\item [Write Part After Job = \lt{}yes|no\gt{}] + \index[dir]{Write Part After Job } + If this directive is set to {\bf yes} (default {\bf no}), the + Volume written to a temporary spool file for the current Job will + be written to the DVD as a new part file + will be created after the job is finished. + + It should be set to {\bf yes} when writing to devices that require a mount + (for example DVD), so you are sure that the current part, containing + this job's data, is written to the device, and that no data is left in + the temporary file on the hard disk. However, on some media, like DVD+R + and DVD-R, a lot of space (about 10Mb) is lost everytime a part is + written. So, if you run several jobs each after another, you could set + this directive to {\bf no} for all jobs, except the last one, to avoid + wasting too much space, but to ensure that the data is written to the + medium when all jobs are finished. + + This directive is ignored for devices other than DVDs. +\end{description} + + + +\label{DVDpoints} +\section{Other Points} +\index[general]{Points!Other } +\index[general]{Other Points } + +\begin{itemize} +\item Please be sure that you have any automatic DVD mounting + disabled before running Bacula -- this includes auto mounting + in /etc/fstab, hotplug, ... If the DVD is automatically + mounted by the OS, it will cause problems when Bacula tries + to mount/unmount the DVD. +\item Please be sure that you the directive {\bf Write Part After Job} + set to {\bf yes}, otherwise the last part of the data to be + written will be left in the DVD spool file and not written to + the DVD. The DVD will then be unreadable until this last part + is written. If you have a series of jobs that are run one at + a time, you can turn this off until the last job is run. +\item The current code is not designed to have multiple simultaneous + jobs writing to the DVD. As a consequence, please ensure that + only one DVD backup job runs at any time. +\item Writing and reading of DVD+RW seems to work quite reliably + provided you are using the patched dvd+rw-mediainfo programs. + On the other hand, we do not have enough information to ensure + that DVD-RW or other forms of DVDs work correctly. +\item DVD+RW supports only about 1000 overwrites. Every time you + mount the filesystem read/write will count as one write. This can + add up quickly, so it is best to mount your DVD+RW filesystem read-only. + Bacula does not need the DVD to be mounted read-write, since it uses + the raw device for writing. +\item Reformatting DVD+RW 10-20 times can apparently make the medium + unusable. Normally you should not have to format or reformat + DVD+RW media. If it is necessary, current versions of growisofs will + do so automatically. +\item We have had several problems writing to DVD-RWs (this does NOT + concern DVD+RW), because these media have two writing-modes: {\bf + Incremental Sequential} and {\bf Restricted Overwrite}. Depending on + your device and the media you use, one of these modes may not work + correctly (e.g. {\bf Incremental Sequential} does not work with my NEC + DVD-writer and Verbatim DVD-RW). + + To retrieve the current mode of a DVD-RW, run: +\begin{verbatim} + dvd+rw-mediainfo /dev/xxx +\end{verbatim} + where you replace xxx with your DVD device name. + + {\bf Mounted Media} line should give you the information. + + To set the device to {\bf Restricted Overwrite} mode, run: +\begin{verbatim} + dvd+rw-format /dev/xxx +\end{verbatim} + If you want to set it back to the default {\bf Incremental Sequential} mode, run: +\begin{verbatim} + dvd+rw-format -blank /dev/xxx +\end{verbatim} + +\item Bacula only accepts to write to blank DVDs. To quickly blank a DVD+/-RW, run + this command: +\begin{verbatim} + dd if=/dev/zero bs=1024 count=512 | growisofs -Z /dev/xxx=/dev/fd/0 +\end{verbatim} + Then, try to mount the device, if it cannot be mounted, it will be considered + as blank by Bacula, if it can be mounted, try a full blank (see below). + +\item If you wish to blank completely a DVD+/-RW, use the following: +\begin{verbatim} + growisofs -Z /dev/xxx=/dev/zero +\end{verbatim} + where you replace xxx with your DVD device name. However, note that this + blanks the whole DVD, which takes quite a long time (16 minutes on mine). +\item DVD+RW and DVD-RW support only about 1000 overwrites (i.e. don't use the +same medium for years if you don't want to have problems...). + +To write to the DVD the first time use: +\begin{verbatim} + growisofs -Z /dev/xxx filename +\end{verbatim} + +To add additional files (more parts use): + +\begin{verbatim} + growisofs -M /dev/xxx filename +\end{verbatim} + +The option {\bf -use-the-force-luke=4gms} was added in growisofs 5.20 to +override growisofs' behavior of always checking for the 4GB limit. +Normally, this option is recommended for all Linux 2.6.8 kernels or +greater, since these newer kernels can handle writing more than 4GB. +See below for more details on this subject. + +\item For more information about DVD writing, please look at the +\elink{dvd+rw-tools homepage}{http://fy.chalmers.se/~appro/linux/DVD+RW/}. + +\item According to bug \#912, bscan cannot read multi-volume DVDs. This is +on our TODO list, but unless someone submits a patch it is not likely to be +done any time in the near future. (9 Sept 2007). + +\end{itemize} diff --git a/docs/manuals/de/concepts/fdl.tex b/docs/manuals/de/concepts/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/de/concepts/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/concepts/fix_tex.pl b/docs/manuals/de/concepts/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/concepts/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/concepts/general.tex b/docs/manuals/de/concepts/general.tex new file mode 100644 index 00000000..517a9159 --- /dev/null +++ b/docs/manuals/de/concepts/general.tex @@ -0,0 +1,535 @@ +%% +%% + +\chapter{Was ist Bacula?} +\label{GeneralChapter} +\index[general]{Bacula!Was ist } +\index[general]{Was ist Bacula? } + + +{\bf Bacula} ist ein System von Computerprogrammen, mit denen Sie +(oder der System-Administrator) in der Lage sind, Computerdaten innerhalb eines +heterogenen Netzwerkes zu sichern, die Sicherungen wiederherzustellen und diese zu \"{u}berpr\"{u}fen. Bacula kann auch auf einem einzigen Computer laufen und auf verschiedene Arten von Medien wie B\"{a}nder oder CDs sichern. +Technisch gesprochen ist es ein netzwerkf\"{a}higes +Sicherungsprogramm mit Client/Server-Architektur. +Bacula ist leistungsf\"{a}hig und vergleichsweise einfach zu benutzen. +Dabei hat es viele anspruchsvolle Funktionen zur Verwaltung der Sicherung, +die das Auffinden und die Wiederherstellung besch\"{a}digter oder verlorener +Dateien erleichtern. Durch seinen modularen Aufbau l\"{a}sst es sich jedem System +anpassen: Vom Einzelplatzrechner bis zu einem gro{\ss}en System mit hunderten von +Computern, die \"{u}ber ein weitr\"{a}umiges Netzwerk verteilt sind. +\section{Wer ben\"{o}tigt Bacula?} +\index[general]{Wer ben\"{o}tigt Bacula? } +\index[general]{Bacula!Wer ben\"{o}tigt } + + +Wenn Sie momentan Programme wie {\bf tar}, {\bf dump}, oder {\bf +bru} zur Datensicherung verwenden und eine Netzwerkl\"{o}sung, gr\"{o}{\ss}ere Flexibilit\"{a}t +oder einen Verzeichnis-Dienst suchen, wird Bacula wahrscheinlich die +zus\"{a}tzlichen Funktionen zur Verf\"{u}gung stellen, die Sie suchen. Wenn Sie dagegen +ein UNIX-Neuling sind oder keine weitergehenden Erfahrung mit +anspruchsvollen Sicherungsprogrammen haben, raten wir von Bacula ab, da es in +der Einrichtung und der Benutzung sehr viel komplizierter ist als z.B. +{\bf tar} oder {\bf dump}. + +Wenn Bacula wie die oben beschriebenen einfachen Programme funktionieren und +nur ein Band in Ihrem Laufwerk beschreiben soll, wird Ihnen der Umgang +mit Bacula kompliziert vorkommen. Bacula ist so entworfen, dass es Ihre +Daten nach von Ihnen festgelegten Regeln sichert, was bedeutet, dass die +Wiederverwendung eines Bandes nur die letzte Wahl sein kann. Nat\"{u}rlich ist es m\"{o}glich, Bacula dazu zu bringen, jedes beliebige Band im Laufwerk zu beschreiben, jedoch ist es einfacher und wirkungsvoller hierf\"{u}r ein anderes Programm zu +verwenden. +Wenn Sie {\bf Amanda} verwenden und ein Sicherungsprogramm suchen, das auf +mehrere Volumes schreiben kann (also nicht durch die Speicherkapazit\"{a}t Ihres +Bandlaufwerkes beschr\"{a}nkt ist) wird Bacula wahrscheinlich Ihren Bed\"{u}rfnissen +entsprechen. Viele unserer Benutzer finden au{\ss}erdem, dass Bacula +einfacher zu konfigurieren und zu benutzen ist als entsprechende andere Programme. + +Wenn Sie gegenw\"{a}rtig ein anspruchsvolles kommerzielles Programm wie ``Legato'', +``Networker'', ``ARCserveIT'', ``Arkeia'', oder ``PerfectBackup+'' verwenden, +k\"{o}nnte Sie Bacula interessieren, da es viele Eigenschaften und Funktionen dieser Programme hat, dabei aber als freie Software unter der GNU Software Lizenz Version 2 verf\"{u}gbar +ist. + + +\section{Bacula Komponenten oder Dienste} +\index[general]{Bacula Komponenten oder Dienste } +\index[general]{Dienste!Bacula Komponenten oder } + + +Bacula besteht aus den folgenden f\"{u}nf Hauptkomponenten bzw. Diensten: + +\addcontentsline{lof}{figure}{Bacula Applications} +\includegraphics{./bacula-applications.eps} +(Dank an Aristedes Maniatis f\"{u}r diese und die folgende Grafik) + +\subsection*{Bacula Director} + \label{DirDef} + Der {\bf Bacula Director}-Dienst ist das Programm, das alle Sicherungs-, Wiederherstellungs-, Verifizierungs- und + Archivierungsvorg\"{a}nge \"{u}berwacht. Der Systemadministrator verwendet den Bacula Director, um die Zeitpunkte der + Sicherungen festzulegen und Dateien wiederherzustellen. N\"{a}heres hierzu im Dokument ``Director Services Daemon + Design'' + im ``Bacula Developer's Guide''. + Der Director l\"{a}uft als D\"{a}mon bzw. Dienst (also im Hintergrund). + +\subsection*{Bacula Console} +\label{UADef} + Der {\bf Bacula Console}-Dienst ist jenes Programm, welches es einem +Systemadministrator oder Benutzer erlaubt, mit dem {\bf Bacula Director} zu +kommunizieren (siehe oben). Zur Zeit ist die Bacula Console in drei Versionen75% +verf\"{u}gbar. Die erste und einfachste ist das Consolen Programm in einer +Shell zu starten (also eine TTY-Schnittstelle). F\"{u}r die meisten +Systemadministratoren ist dies v\"{o}llig angemessen. Die zweite M\"{o}glichkeit ist ein +grafisches GNOME-Interface, das weit davon entfernt +ist, vollst\"{a}ndig zu sein, aber schon ganz gut funktioniert und die meisten +M\"{o}glichkeiten bietet, die auch die Shell-Konsole hat. Die dritte +Version ist eine grafische wxWidgets-Benutzeroberfl\"{a}che, \"{u}ber die Daten +interaktiv wiederhergestellt werden k\"{o}nnen. Auch sie hat die meisten Funktionen +der Shell-Konsole, bietet eine Befehlsvervollst\"{a}ndigung per Tabulatorentaste +und Kontexthilfe w\"{a}hrend der Befehlseingabe. +N\"{a}heres hierzu im Kapitel \ilink{Bacula Console +Design Document}{_ChapterStart23}. +\subsection*{Bacula File} +\label{FDDef} + {\bf Bacula File} (Datei)-Dienste (bzw. Client-Programme) sind jene +Programme, die auf den Rechnern installiert sind, deren Daten gesichert +werden sollen. Sie sind je nach Betriebssystem verschieden, immer aber +verantwortlich f\"{u}r die Auslieferung der Daten und deren Attribute, die der +Director von ihnen anfordert. Die Datendienste sind auch f\"{u}r den +betriebssystemabh\"{a}ngigen Teil der Wiederherstellung der Daten und deren +Attribute im Falle einer Wiederherstellung zust\"{a}ndig. N\"{a}heres +hierzu im Dokument ``File Services Daemon Design'' im ``Bacula Developer's +Guide''. Auf dem Rechner, dessen Daten gesichert werden sollen, l\"{a}uft dieses +Programm als D\"{a}monprozess. Der File-D\"{a}mon wird in dieser Dokumentation auch als +``Client'' bezeichnet (zum Beispiel in den Konfigurationsdatei von Bacula). +Ausser den Unix/Linux File-D\"{a}monen gibt es einen File-D\"{a}mon f\"{u}r Windows +(der in der Regel als kompiliertes Programm erh\"{a}ltlich ist). Der File-D\"{a}mon f\"{u}r Windows l\"{a}uft +unter allen g\"{a}ngigen Windows-Versionen (95, 98, Me, NT, 2000, XP). +\subsection*{Bacula Storage} +\label{SDDef} + Den {\bf Bacula Storage} (Sicherungs)-Dienst leisten Programme, die +Sicherung und Wiederherstellung der Dateien und ihrer Attribute auf das +physikalische Sicherungsmedium bzw. die \textbf{Volumes} leisten. Der Storage-D\"{a}mon ist also f\"{u}r das +Beschreiben und Lesen Ihrer B\"{a}nder (oder eines anderen Sicherungsmediums wie +z.B. Dateien) zust\"{a}ndig. N\"{a}heres hierzu im Kapitel ``Storage Services Daemon +Design'' im ``Bacula Developer's Guide''. Der Sicherungsdienst l\"{a}uft als +D\"{a}monprozess auf dem Rechner, der \"{u}ber das Datensicherungsger\"{a}t verf\"{u}gt (in der +Regel ein Bandlaufwerk). +\subsection*{Catalog} +\label{DBDefinition} + Die {\bf Catalog} (Verzeichnis)-Dienste werden von Programmen +geleistet, die f\"{u}r die Wartung der Datieindizes und \textbf{Volume}-Datenbanken +aller gesicherten Dateien zust\"{a}ndig sind. Über einen Verzeichnis-Dienst kann der +Systemadministrator oder Benutzer jede gew\"{u}nschte Datei rasch wiederfinden +und wiederherstellen. Durch den Verzeichnisdienst unterscheidet sich Bacula von +einfachen Sicherungsprogrammen wie ``tar'' oder ``bru'', da dieser +Dienst die Aufzeichnung aller verwendeten \textbf{Volumes}, aller gelaufener +Sicherungen und aller gesicherter Dateien pflegt und dadurch eine +effiziente Wiederherstellung und eine Verwaltung der Volumes erlaubt. Bacula +unterst\"{u}tzt momentan drei verschiedene Datenbanksysteme, MySQL, PostgreSQL +und SQLite, von denen eines vor der Kompilierung von {\bf Bacula} ausgew\"{a}hlt sein +muss. + +Die drei Datenbanksysteme (MySQL, PostgreSQL und SQLite), die z.Z. unterst\"{u}tzt +werden, haben eine ganze Reihe von Besonderheiten wie z.B. schnelle Indizierung, +Baumsuche und Sicherheitsfunktionen. Wir planen die Unterst\"{u}tzung weiterer +gr\"{o}{\ss}erer SQL-Datenbanksysteme, doch hat die momentane Bacula-Version nur +Schnittstellen zu MySQL, PostgreSQL und SQLite. N\"{a}heres hierzu im Kapitel +\ilink{``Catalog Services Design Document''}{_ChapterStart30}. + +Die RPM-Archive von MySQL und PostgreSQL sind Teil der RedHat-Linux- und mehrerer +anderer Distributionen, zudem ist die Erstellung der RPMs aus den Quelldateien +ganz einfach. N\"{a}heres hierzu im Kapitel \ilink{``Installation und Konfiguration +von MySQL''}{_ChapterStart} in diesem Handbuch. Weitere Informationen zu MySQL +im Internet: \elink{www.mysql.com}{http://www.mysql.com}. +Zu PostgreSQL lesen Sie bitte das Kapitel \ilink{``Installation und +Konfiguration von PostgreSQL''}{_ChapterStart10} in diesem Dokument. +Weiter Informationen zu PostgreSQL finden Sie hier: +\elink{www.postgresql.org}{http://www.postgresql.org}. + +Die Konfiguration und Installation eines SQLite-Datenbanksystems ist noch +einfacher. Einzelheiten dazu im Kapitel \ilink{``Installation und Konfiguration +von SQLite''}{_ChapterStart33} in diesem Handbuch. + +\subsection*{Bacula Monitor} +\label{MonDef} + Der {\bf Bacula Monitor}-Dienst ist das Programm, welches es dem +Administrator oder Benutzer erlaubt, den aktuellen Zustand des {\bf Bacula +Directors}, der {\bf Bacula File D\"{a}monen} und der {\bf Bacula Storage D\"{a}monen} +zu beobachten (siehe oben). Zur Zeit ist hierf\"{u}r nur eine GTK+-Version +verf\"{u}gbar, die auf Gnome und KDE aufsetzt (oder jedem anderen Fenstermanager, +der den Standard von FreeDesktop.org f\"{u}r das System-Tray unterst\"{u}tzt). + +Um erfolgreich sichern und wiederherstellen zu k\"{o}nnen, m\"{u}ssen die folgenden +vier D\"{a}monprozesse konfiguriert und gestartet sein: Der Director-D\"{a}mon, der +File-D\"{a}mon, der Storage-D\"{a}mon und MySQL, PostgreSQL oder SQLite. + +\section{Die Bacula Konfiguration} +\index[general]{Konfiguration!Die Bacula } +\index[general]{Die Bacula Konfiguration } + +Damit sich Bacula in Ihrem System zurechtfindet und es weiss welche +Client-Rechner wie zu sichern sind, m\"{u}ssen mehrere Konfigurationsdateien +erstellt werden, die ``Resourcen'' (bzw. ``Objekte'') enthalten. Die +folgende Abbildung gibt hierzu eine Übersicht: + +\addcontentsline{lof}{figure}{Bacula Objects} +\includegraphics{./bacula-objects.eps} + +\section{Die in diesem Dokument verwendeten Konventionen} +\index[general]{Die in diesem Dokument verwendeten Konventionen } +\index[general]{Dokument!verwendete Konventionen} + +{\bf Bacula} ist in der Entwicklung und daher wird dieses Handbuch nicht in +jedem Fall mit dem Stand des Programmcodes \"{u}bereinstimmen. Steht in diesem +Handbuch vor einem Abschnitt ein Stern (*), bedeutet dies, dass das Beschriebene +noch nicht implementiert ist. Die Kennzeichnung durch ein Pluszeichen (+) +bedeutet, dass die Funktion m\"{o}glicherweise teilweise implementiert ist. + +Wenn Sie dieses Handbuch als Teil eines offiziellen Release der +Software lesen, ist diese Kennzeichnung verl\"{a}{\ss}lich. Lesen Sie hingegen die +Online-Version dieses Handbuches auf \elink{ www.bacula.org}{http://www.bacula.org}, denken Sie +bitte daran, dass hier die aktuelle Entwicklungsversion (wie sie im CVS +vorhanden ist) beschrieben wird. In beiden F\"{a}llen wird aber das Handbuch dem +Code ein St\"{u}ckchen hinterherhinken. + +\section{Quick Start} +\index[general]{Quick Start } +\index[general]{Start!Quick } + +Um Bacula schnell zu konfigurieren und zum Laufen zu bringen, empfehlen wir, +zuerst den untenstehenden Abschnitt mit den Fachausdr\"{u}cken und das n\"{a}chste +Kapitel \ilink{``Baculas gegenw\"{a}rtiger Zustand''}{_ChapterStart2} durchzusehen. + +Lesen Sie dann das Kapitel \ilink{``Mit Bacula beginnen''}{_ChapterStart37}, das +eine kurze Übersicht dar\"{u}ber gibt, wie man Bacula startet. Lesen +Sie danach das Kapitel \"{u}ber \ilink{``Die Installation von +Bacula''}{_ChapterStart17}, dann \ilink{``Die Konfiguration +von Bacula''}{_ChapterStart16} und schlie{\ss}lich das Kapitel \ilink{ +``Bacula in Betrieb nehmen''}{_ChapterStart1}. + +\section{Terminologie} +\index[general]{Terminologie } + +Um die Kommunikation \"{u}ber diese Projekt zu erleichtern, sind hier die +verwendeten Begriffe erl\"{a}utert + +\begin{description} + +\item [Administrator] + \index[fd]{Administrator } + Die Person bzw. die Personen, die f\"{u}r die Pflege des Bacula-Systems +verantwortlich sind. + +\item [Backup] + \index[fd]{Backup } + Wir verwenden den Ausdruck {\bf Backup} (Sicherung) wenn wir von einem +Bacula-Job sprechen, bei dem Dateien gesichert werden. + +\item [Bootstrap File] + \index[fd]{Bootstrap File } + Das bootstrap file (Bootstrap-Datei) ist eine ASCII-Datei, die in kompakter +Form jene Befehle enth\"{a}lt, mit denen Bacula oder das eigenst\"{a}ndige +Dateiextrahierungswerkzeug {\bf bextract} den Inhalt eines oder mehrerer +\textbf{Volumes} wiederherstellen kann, wie z.B. einen vorher gesicherten Systemzustand. +Mit einer Bootstrap-Datei kann Bacula Ihr System wiederherstellen, ohne auf +einen \textbf{Catalog} angewiesen zu sein. Aus einem \textbf{Catalog} kann eine Bootstrap-Datei +erzeugt werden, um jede gew\"{u}nschte Datei/Dateien zu entpacken. + +\item [Catalog] + \index[fd]{Catalog } + Der \textbf{Catalog} (das Verzeichnis) wird verwendet, um zusammenfassene +Informationen \"{u}ber Jobs, Clients und Dateien zu speichern und Informationen +dar\"{u}ber, in welchem \textbf{Volume} oder in welchen \textbf{Volumen} dies geschehen ist. Die +Informationen, die im \textbf{Catalog} gespeichert sind, erm\"{o}glichen es dem Administrator +bzw. Benutzer zu bestimmen, welche Jobs gelaufen sind, geben Auskunft \"{u}ber ihren +Status und wichtige Eigenschaften der gesicherten Dateien. Der Catalog ist eine +``online resource'', enth\"{a}lt aber nicht die Daten der gesicherten Dateien. Vieles +der \textbf{Catalog}-Informationen ist auch in den \textbf{Volumes} (z.B. auf den B\"{a}ndern) +gespeichert. Nat\"{u}rlich sind auf den B\"{a}ndern auch die Kopien der Dateien und +deren Attribute (siehe unten). + +Der \textbf{Catalog} ist ein Besonderheit Baculas, das es von einfachen Backup- und +Archiv-Programmen wie {\bf dump} und {\bf tar} unterscheidet. + +\item [Client] + \index[fd]{Client } + In Baculas Terminologie bezeichnet das Wort Client jenen Rechner, +dessen Daten gesichert werden. Client ist auch ein anderes Wort f\"{u}r +den File-Dienst oder File-D\"{a}mon, der oft auch nur mit FD bezeichnet wird. +Clients werden in einer Resource der Konfigurationsdatei definiert. + +\item [Console] + \index[fd]{Console } + Die Console(Konsole) ist ein Programm, das die Schnittstelle zum Director +bildet und \"{u}ber welches der Benutzer oder Systemadministrator Bacula +steuern kann. + +\item [Daemon] + \index[fd]{Daemon } + (D\"{a}monprozess) ist ein Unix-Fachausdruck f\"{u}r ein Programm, das +st\"{a}ndig im Hintergrund l\"{a}uft um spezielle Aufgaben +auszuf\"{u}hren. Auf Windows- und manchen Linux-Systemen werden +D\"{a}monprozesse {\bf Services}(Dienste) genannt. + +\item [Directive] + \index[fd]{Directive } + Der Ausdruck directive (Anweisung) bezeichnet eine einzelne Angabe oder +eine Niederschrift innerhalb einer Resource einer Konfigurationsdatei, welche +einen speziellen Sachverhalt definiert. Beispielsweise definiert die {\bf +Name}-directive den Namen einer Resource. + +\item [Director] + \index[fd]{Director } + Baculas wichtigster D\"{a}monprozess, der alle Aktivit\"{a}ten des Bacula-Systems +zeitlich festlegt und beaufsichtigt. Gelegentlich bezeichnen wir ihn als +DIR. + +\item [Differential] + \index[fd]{Differential } + Differentiell ist eine Sicherung, wenn sie alle Dateien einbezieht, die +seit Beginn der letzten Vollsicherung ge\"{a}ndert wurden. Beachten Sie bitte, dass +dies von anderen Sicherungsprogrammen m\"{o}glicherweise anders definiert wird. + +\item [File Attributes] + \index[fd]{File Attributes } + File Attributes (Dateiattribute) sind all diejenigen Informationen, die +n\"{o}tig sind, um eine Datei zu identifizieren. Dazu geh\"{o}ren alle ihre +Eigenschaften wie Gr\"{o}{\ss}e, Zeitpunkt der Erzeugung, Zeitpunkt der letzten +Änderung, Berechtigungen, usw. Im Normalfall wird der Umgang mit den Attributen vollst\"{a}ndig von Bacula \"{u}bernommen, so dass sich der Benutzer dar\"{u}ber keine +Gedanken machen muss. Zu den Attributen geh\"{o}rt nicht der Inhalt der +Datei. + +\item [File Daemon] + \index[fd]{File Daemon } + Derjenige D\"{a}monprozess, welcher auf dem Client-Computer l\"{a}uft, dessen Daten +gesichert werden sollen. Wird manchmal auch als File-Service (Datendienst), +Client-Service (Client-Dienst) oder als FD bezeichnet. + + +\label{FileSet} +\item [FileSet] +\index[fd]{a name } +Ein FileSet (Zusammenstellung von Dateien) ist eine Resource einer +Konfigurationsdatei, die festlegt, welche Dateien gesichert werden sollen. +Sie besteht aus einer Liste mit eingeschlossenen Dateien oder +Verzeichnissen, einer Liste mit ausgeschlossenen Dateien und Informationen +dar\"{u}ber, wie diese Dateien zu sichern sind (komprimiert, verschl\"{u}sselt, +signiert). N\"{a}heres hierzu im Abschnitt \ilink{``Definition der FileSet +Resource''}{FileSetResource} im \textbf{Director}-Kapitel dieses Dokuments. + +\item [Incremental] + \index[fd]{Incremental } + Inkrementiell ist eine Sicherung dann, wenn sie alle Dateien einbezieht, die +seit Beginn der letzten vollen, differentiellen oder inkrementiellen Sicherung +ge\"{a}ndert wurden. Normalerweise wird dies entweder durch die {\bf +Level}-Direktive innerhalb der Definition einer \textbf{Job Resource} oder in +einer \textbf{Schedule}-Resource festgelegt. + + +\label{JobDef} +\item [Job] +\index[fd]{a name } +Ein Bacula-Job ist eine Konfigurations-Resource, die die Art und +Weise definiert, in der Bacula die Daten eines bestimmten Client-Rechners +sichert oder wiederherstellt. Sie besteht aus den Definitionen des {\bf Type} +(Sicherung, Wiederherstellung, Überpr\"{u}fung, usw.), des {\bf Level} (voll, +inkrementiell,...), des {\bf FileSet} und des Speicherorts ({\bf Storage}) an +welchem die Dateien gesichert werden sollen (Speicherger\"{a}t, Media-Pool). N\"{a}heres +hierzu im Abschnitt \ilink{``Definition der Job-Resource'' +}{JobResource} im \textbf{Director}-Kapitel dieses Dokuments. + +\item [Monitor] + \index[fd]{Monitor } + Dieses Programm hat eine Schnittstelle zu allen D\"{a}monprozessen, um dem +Benutzer oder Systemadministrator die Beobachtung von Baculas Zustand zu +erm\"{o}glichen. + +\item [Resource] + \index[fd]{Resource } + Eine \textbf{Resource} ist ein Teil einer Konfigurationsdatei, die eine +bestimmte Informationseinheit definiert, die Bacula verf\"{u}gbar ist. Eine +\textbf{Resorce} enth\"{a}lt mehrere Direktiven (einzelne +Konfigurations-Anweisungen). Die {\bf Job}-Resource beispielsweise definiert +alle Eigenschaften eines bestimmten Jobs: Name, Zeitplan, Volume-Pool, Art der +Sicherung, Level der Sicherung... + +\item [Restore] + \index[fd]{Restore } + ist eine \textbf{Resource} innerhalb einer Konfigurationsdatei, die den +Vorgang der Wiederherstellung einer verlorenen oder besch\"{a}digten Datei von +einem Sicherungsmedium beschreibt. Es ist der umgekehrte Vorgang wie bei +einer Sicherung, au{\ss}er dass in den meisten F\"{a}llen bei einem \textbf{Restore} nur +einige wenige Dateien wiederhergestellt werden, w\"{a}hrend bei einer Sicherung +normalerweise alle Dateien eines Systems gesichert werden. Selbstverst\"{a}ndlich +kann nach dem Ausfall einer Festplatte Bacula dazu benutzt werden, ein +vollst\"{a}ndiges \textbf{Restore} aller im System vorhandenen Dateien auszuf\"{u}hren. + +\item [Schedule] + \index[fd]{Schedule } + Ein \textbf{Schedule} (Zeitplan) ist eine \textbf{Resource} innerhalb einer +Konfigurationsdatei, die definiert, wann ein \textbf{Bacula-Job} ausgef\"{u}hrt +wird. Hierzu benutzt die \textbf{Job-Resource} den Namen des \textbf{Schedules}. +N\"{a}heres hierzu im Abschnitt \ilink{``Definition +der Schedule-Resource''}{ScheduleResource} im +``Director''-Kapitel diese Handbuches. + +\item [Service] + \index[fd]{Service } + (Dienst) ist die Bezeichnung f\"{u}r einen {\bf Daemon}(D\"{a}monprozess) unter +Windows (siehe oben). Diese Bezeichnung wird in letzter Zeit auch h\"{a}ufig in +Unix-Umgebungen benutzt. + +\item [Storage Coordinates] + \index[fd]{Storage Coordinates } + Diejenige Information, die der \textbf{Storage-Dienst} zur\"{u}ckgibt und eine +Datei eindeutig im Sicherungsmedium lokalisiert. Sie besteht aus einem Teil der +zu jeder gespeicherten Datei geh\"{o}rt und einem Teil, der zum ganzen Job +geh\"{o}rt. +Normalerweise wird diese Information im \textbf{Catalog} gespeichert, so dass der +Benutzer keine besonderen Kenntnisse der \textbf{Storage Coordinates} braucht. +Zu den \textbf{Storage Coordinates} geh\"{o}ren die Dateiattribute und der eindeutige Ort der Sicherung auf dem Sicherungs-Volume. + +\item [Storage Daemon] + \index[fd]{Storage Daemon } + Der \textbf{Storage daemon} (Speicherd\"{a}mon), manchmal auch mit SD bezeichnet, +ist jenes Programm, das die Attribute und die Daten auf ein Sicherungs-Volume +schreibt (normalerweise ein Band oder eine Festplatte). + +\item [Session] + \index[sd]{Session } (Sitzung) bezeichnet in der Regel die interne Kommunikation +zwischen dem File-D\"{a}mon und dem Storage-D\"{a}mon. Der File-D\"{a}mon er\"{o}ffnet +eine {\bf Session} mit dem Storage-D\"{a}mon, um ein \textbf{FileSet} zu sichern +oder wiederherzustellen. Jede \textbf{Session} entspricht einem \textbf{Bacula-Job} (siehe oben). + +\item [Verify] + \index[sd]{Verify } + Ein \textbf{Verify} ist ein Job, bei dem die aktuellen Dateiattribute mit +jenen verglichen werden, die zuvor im \textbf{Bacula-Catalog} +hinterlegt worden sind. Diese Funktion kann verwendet werden, um Änderungen an +wichtigen Systemdateien zu erkennen und ist damit {\bf Tripwire} \"{a}hnlich. Einer +der gr\"{o}{\ss}eren Vorteile dieser Funktionalit\"{a}t ist es, dass es gen\"{u}gt, +auf dem Rechner, den man sch\"{u}tzen will, den \textbf{File-D\"{a}mon} laufen zu haben. +\textbf{Director}, \textbf{Storage-D\"{a}mon} und der \textbf{Catalog} sind auf +einem anderen Rechner installiert. Wenn der Server dann gef\"{a}hrdet wird, ist es +\"{a}u{\ss}erst unwahrscheinlich, dass die Datenbank mit den Verifikationen davon +mitbetroffen ist. + +\textbf{Verify} kann auch zur Überpr\"{u}fung benutzt werden, ob die Daten des +zuletzt gelaufenen Jobs mit denen \"{u}bereinstimmen, welche davon im \textbf{Catalog} +gespeichert ist (es werden also die Dateiattribute verglichen). *\textbf{Verify} +vergleicht auch den Inhalt eines Volumes mit den Originaldateien auf der +Platte. + +\item [*Archive] + \index[fd]{*Archive } + Eine \textbf{Archive}-Funktion wird nach einer Sicherung durchgef\"{u}hrt. Dabei +werden die \textbf{Volumes}, in denen die Daten gesichert sind, der aktiven +Benutzung entzogen, als ``Archived'' gekennzeichnet und f\"{u}r weitere +Sicherungen nicht mehr verwendet. Alle Dateien, die ein archiviertes +\textbf{Volume} enth\"{a}lt, werden aus dem Catalog entfernt. NOCH NICHT +IMPLEMENTIERT. + +\item [*Update] + \index[fd]{*Update } + Mit der \textbf{Update}-Funktion werden die Dateien auf dem entfernten +Rechner durch die entsprechenden vom Host-Rechner ersetzt. Dies entspricht der +Funktionalit\"{a}t von {\bf rdist}. NOCH NICHT IMPLEMENTIERT. + +\item [Retention Period] + \index[fd]{Retention Period } + Bacula kennt verschiedene Arten von \textbf{Retention Periods} (Zeitspannen +w\"{a}hrend derer etwas bewahrt wird, Aufbewahrungszeiten). Die wichtigsten sind +die {\bf File Retention Period}, die {\bf Job Retention Period} und die {\bf +Volume Retention Period}. Jede dieser Retention-Periods bezieht sich auf die +Zeit, w\"{a}hrend der bestimmte Aufzeichnungen in der \textbf{Catalog}-Datenbank +gehalten werden. Dies sollte nicht mit jener Zeit verwechselt werden w\"{a}hrend +der Daten eines Volume g\"{u}ltig sind. + +Die \textbf{File Retention Period} bestimmt wie lange die Eintr\"{a}ge zu den +Dateien in der \textbf{Catalog}-Datenbank gehalten werden. Diese Zeitspanne ist +wichtig, da diese Eintr\"{a}ge bei weitem den gr\"{o}{\ss}ten Teil des Speicherplatzes in +der Datenbank belegen. Daher muss gew\"{a}hrleistet sein, dass \"{u}berfl\"{u}ssige oder +obsolete Eintr\"{a}ge regelm\"{a}{\ss}ig aus der Datenbank entfernt werden (hierzu N\"{a}heres im Abschnitt zum {\bf Retention}-Befehl in der Beschreibung der \textbf{Console}-Befehle). + +Die \textbf{Job Retention Period} ist die Zeitspanne, w\"{a}hrend der Eintr\"{a}ge zu +den Jobs in der Datenbank gehalten werden. Beachten Sie, dass alle Dateieintr\"{a}ge +mit dem Job, mit dem sie gesichert wurden, verbunden sind. Die Eintr\"{a}ge zu den +Dateien k\"{o}nnen gel\"{o}scht sein, w\"{a}hrend die Aufzeichnungen zu den Jobs erhalten +bleiben. In diesem Fall wird man Informationen \"{u}ber gelaufene Sicherungsjobs +haben, jedoch keine Einzelheiten \"{u}ber die Dateien, die dabei gesichert +wurden. Normalerweise werden mit dem L\"{o}schen eines +Job-Eintrags auch alle seine Aufzeichnungen zu den Dateien gel\"{o}scht. + +Die \textbf{Volume Retention Period} bestimmt die Mindestzeit, w\"{a}hrend der +ein bestimmtes \textbf{Volume} erhalten bleibt, bevor es wiederverwendet wird. +Bacula wird in der Regel niemals ein \textbf{Volume} \"{u}berschreiben, das als +einziges die Sicherungskopie einer bestimmten Datei enth\"{a}lt. Im Idealfall wird +der \textbf{Catalog} f\"{u}r alle benutzten \textbf{Volume} die Eintr\"{a}ge aller +gesicherten Daeien enthalten. Wenn ein \textbf{Volume} \"{u}berschrieben wird, +werden die Dateieeintr\"{a}ge, die zuvor in ihm gespeichert waren aus dem +\textbf{Catalog} entfernt. Gibt es allerdings einen sehr gro{\ss}en Pool von +\textbf{Volumes} oder gibt es \textbf{Volumes}, die nie \"{u}berschrieben werden, +kann die \textbf{Catalog}-Datenbank riesig werden. Um den \textbf{Catalog} in +einer handhabbaren Gr\"{o}{\ss}e zu halten, sollten Informationen zu den Sicherungen +nach der definierten \textbf{File Retention Period} aus ihm entfernt werden. +Bacula hat Mechanismen, um den \textbf{Catalog} entsprechend der +definierten \textbf{Retention Periods} automatisch zu bereinigen. + +\item [Scan] + \index[sd]{Scan } + Bei einer \textbf{Scan}-Operation wird der Inhalt eines oder mehrerer +\textbf{Volumes} durchsucht. Diese \textbf{Volumes} und die Informationen \"{u}ber +die Dateien, welche sie enthalten, werden wieder in den Bacula-\textbf{Catalog} +eingetragen. Danach k\"{o}nnen die Dateien in diesen \textbf{Volumes} auf +einfache Weise wiederhergestellt werden. Diese Funktion ist teilweise +hilfreich, wenn bestimmte \textbf{Volumes} oder \textbf{Jobs} ihre \textbf{Retention Period} +\"{u}berschritten haben und aus dem \textbf{Catalog} entfernt worden sind. Um die +Daten von den \textbf{Volumes} in die Datenbank einzulesen, wird das Programm +{\bf bscan} verwendet. N\"{a}heres hierzu im Abschnitt \ilink{bscan}{bscan} im Kapitel ``Bacula Hilfsprogramme'' dieses Handbuches. + +\item [Volume] + \index[sd]{Volume } + Ein \textbf{Volume} ist eine Einheit, auf der gesichert wird, normalerweise +ein Band oder eine benannte Datei auf der Festplatte auf welche/s Bacula die +Daten einer oder mehrerer Sicherungsjobs speichert. Alle \textbf{Volumes} +erhalten von Bacula eine digitale Kennzeichnung, so dass Bacula jederzeit wei{\ss}, +welches \textbf{Volume} es tats\"{a}chlich liest. (Normalerweise sollte es mit +Dateien auf der Festplatte keine Verwechslungen geben, doch bei B\"{a}ndern mountet +man aus Versehen leicht das Falsche). +\end{description} + +\section{Was Bacula nicht ist} +\index[general]{Was Bacula nicht ist } +\index[general]{nicht ist!Was Bacula } + +{\bf Bacula} ist ein Sicherungs-, Wiederherstellungs- und Verifikationsprogramm, aber von sich aus noch kein komplettes Rettungsprogramm f\"{u}r den Katastrophenfall. Allerdings kann +Bacula Teil eines Rettungsprogramms sein, falls Sie sorgf\"{a}ltig planen und die +Anweisungen im Kapitel \ilink{Disaster Recovery}{_ChapterStart38} dieses +Handbuches beachten. + +Bei sorgf\"{a}ltiger Planung, wie sie im Kapitel ``Disaster Recovery'' dargestellt +ist, kann {\bf Bacula} ein wesentlicher Bestandteil eines +Rettungssystems sein. Wenn Sie zum Beispiel eine Bootdiskette +erstellt haben, dazu eine Bacula-Rettungs-CD, auf der sie die aktuellen +Partitionsdaten Ihrer Festplatte gespeichert haben und eine komplette Bacula +Sicherung vorhalten, ist es m\"{o}glich, Ihr System auf einer leeren Festplatte +wieder herzustellen. + +Wenn Sie die den Befehl {\bf WriteBootstrap} in einem Ihrer Sicherungs-Jobs +verwendet oder auf irgend eine andere Art eine g\"{u}ltige +\textbf{Bootstrap}-Datei gesichert haben, werden Sie damit in der Lage sein, die notwendigen Dateien zu entpacken (ohne den \textbf{Catalog} zu verwenden +oder von Hand nach Dateien suchen zu m\"{u}ssen). + +\newpage + +\section{Interaktionen zwischen den Bacula-Diensten} +\index[general]{Interaktionen zwischen den Bacula-Diensten } +\index[general]{Diensten!Interaktionen zwischen den Bacula- } + +Das untenstehende Diagramm zeigt typische Interaktionen zwischen den einzelnen +Bacula-Diensten bei einem Sicherungs-Job. Jeder Block steht ungef\"{a}hr f\"{u}r einen +eigenen Prozess (normalerweise ein D\"{a}mon). Im gro{\ss}en und ganzen hat der +Director den Überblick \"{u}ber die Aktionen und pflegt die +Katalog-Datenbank. + +\addcontentsline{lof}{figure}{Interaktionen zwischen den Bacula-Diensten} +\includegraphics{./flow.eps} diff --git a/docs/manuals/de/concepts/gpl.tex b/docs/manuals/de/concepts/gpl.tex new file mode 100644 index 00000000..a368afc7 --- /dev/null +++ b/docs/manuals/de/concepts/gpl.tex @@ -0,0 +1,420 @@ +%% +%% + +\section*{GNU General Public License} +\label{GplChapter} +\index[general]{GNU General Public License } +\index[general]{License!GNU General Public } + +\elink{image of a Philosophical +GNU}{http://www.gnu.org/graphics/philosophicalgnu.html} + +\begin{itemize} +\item + \elink{What to do if you see a possible GPL + violation}{http://www.gnu.org/copyleft/gpl-violation.html} +\item + \elink{Translations of the + GPL}{http://www.gnu.org/copyleft/copyleft.html\#translations} +\end{itemize} + + +\section{Table of Contents} +\index[general]{Table of Contents } +\index[general]{Contents!Table of } + +\begin{itemize} +\item + \label{TOC1} + \ilink{GNU GENERAL PUBLIC LICENSE}{SEC1} + +\begin{itemize} +\item + \label{TOC2} + \ilink{Preamble}{SEC2} +\item + \label{TOC3} + \ilink{TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND +MODIFICATION}{SEC3} +\item + \label{TOC4} + \ilink{How to Apply These Terms to Your New Programs}{SEC4} +\end{itemize} + +\end{itemize} + + +\section{GNU GENERAL PUBLIC LICENSE} +\label{SEC1} +\index[general]{GNU GENERAL PUBLIC LICENSE } +\index[general]{LICENSE!GNU GENERAL PUBLIC } + +Version 2, June 1991 + +\footnotesize +\begin{verbatim} +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. +\end{verbatim} +\normalsize + +\section{Preamble} +\label{SEC2} +\index[general]{Preamble } + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software\verb:--:to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software is +covered by the GNU Library General Public License instead.) You can apply it +to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom to +distribute copies of free software (and charge for this service if you wish), +that you receive source code or can get it if you want it, that you can change +the software or use pieces of it in new free programs; and that you know you +can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to +deny you these rights or to ask you to surrender the rights. These +restrictions translate to certain responsibilities for you if you distribute +copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for +a fee, you must give the recipients all the rights that you have. You must +make sure that they, too, receive or can get the source code. And you must +show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If the +software is modified by someone else and passed on, we want its recipients to +know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program proprietary. +To prevent this, we have made it clear that any patent must be licensed for +everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +\section{TERMS AND CONDITIONS} +\label{SEC3} +\index[general]{CONDITIONS!TERMS AND } +\index[general]{TERMS AND CONDITIONS } + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +{\bf 0.} This License applies to any program or other work which contains a +notice placed by the copyright holder saying it may be distributed under the +terms of this General Public License. The "Program", below, refers to any +such program or work, and a "work based on the Program" means either the +Program or any derivative work under copyright law: that is to say, a work +containing the Program or a portion of it, either verbatim or with +modifications and/or translated into another language. (Hereinafter, +translation is included without limitation in the term "modification".) Each +licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running the Program is +not restricted, and the output from the Program is covered only if its +contents constitute a work based on the Program (independent of having been +made by running the Program). Whether that is true depends on what the Program +does. + +{\bf 1.} You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and give any other recipients of the +Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +{\bf 2.} You may modify your copy or copies of the Program or any portion of +it, thus forming a work based on the Program, and copy and distribute such +modifications or work under the terms of Section 1 above, provided that you +also meet all of these conditions: + +\begin{itemize} +\item {\bf a)} You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + +\item {\bf b)} You must cause any work that you distribute or publish, that + in whole or in part contains or is derived from the Program or any part + thereof, to be licensed as a whole at no charge to all third parties under + the terms of this License. + +\item {\bf c)} If the modified program normally reads commands interactively + when run, you must cause it, when started running for such interactive use in + the most ordinary way, to print or display an announcement including an + appropriate copyright notice and a notice that there is no warranty (or else, + saying that you provide a warranty) and that users may redistribute the + program under these conditions, and telling the user how to view a copy of + this License. (Exception: if the Program itself is interactive but does not + normally print such an announcement, your work based on the Program is not + required to print an announcement.) +\end{itemize} + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Program, the distribution of the whole must be on +the terms of this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Program. + +In addition, mere aggregation of another work not based on the Program with +the Program (or with a work based on the Program) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. + +{\bf 3.} You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections 1 and +2 above provided that you also do one of the following: + +\begin{itemize} +\item {\bf a)} Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections 1 and 2 + above on a medium customarily used for software interchange; or, + +\item {\bf b)} Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your cost of + physically performing source distribution, a complete machine-readable copy of + the corresponding source code, to be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + +\item {\bf c)} Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is allowed only + for noncommercial distribution and only if you received the program in object + code or executable form with such an offer, in accord with Subsection b + above.) +\end{itemize} + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and +installation of the executable. However, as a special exception, the source +code distributed need not include anything that is normally distributed (in +either source or binary form) with the major components (compiler, kernel, and +so on) of the operating system on which the executable runs, unless that +component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to +copy from a designated place, then offering equivalent access to copy the +source code from the same place counts as distribution of the source code, +even though third parties are not compelled to copy the source along with the +object code. + +{\bf 4.} You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt otherwise to +copy, modify, sublicense or distribute the Program is void, and will +automatically terminate your rights under this License. However, parties who +have received copies, or rights, from you under this License will not have +their licenses terminated so long as such parties remain in full compliance. + +{\bf 5.} You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or distribute +the Program or its derivative works. These actions are prohibited by law if +you do not accept this License. Therefore, by modifying or distributing the +Program (or any work based on the Program), you indicate your acceptance of +this License to do so, and all its terms and conditions for copying, +distributing or modifying the Program or works based on it. + +{\bf 6.} Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the original +licensor to copy, distribute or modify the Program subject to these terms and +conditions. You may not impose any further restrictions on the recipients' +exercise of the rights granted herein. You are not responsible for enforcing +compliance by third parties to this License. + +{\bf 7.} If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from +the conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Program at all. +For example, if a patent license would not permit royalty-free redistribution +of the Program by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system, which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +{\bf 8.} If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Program under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +{\bf 9.} The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will be +similar in spirit to the present version, but may differ in detail to address +new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of this License, +you may choose any version ever published by the Free Software Foundation. + +{\bf 10.} If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author to +ask for permission. For software which is copyrighted by the Free Software +Foundation, write to the Free Software Foundation; we sometimes make +exceptions for this. Our decision will be guided by the two goals of +preserving the free status of all derivatives of our free software and of +promoting the sharing and reuse of software generally. + +{\bf NO WARRANTY} + +{\bf 11.} BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE +THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR +IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO +THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM +PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +{\bf 12.} IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO +LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR +THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +END OF TERMS AND CONDITIONS + +\section{How to Apply These Terms to Your New Programs} +\label{SEC4} +\index[general]{Programs!How to Apply These Terms to Your New } +\index[general]{How to Apply These Terms to Your New Programs } + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + +\footnotesize +\begin{verbatim} +{\em one line to give the program's name and an idea of what it does.} +Copyright (C) {\em yyyy} {\em name of author} +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +02110-1301 USA +\end{verbatim} +\normalsize + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it +starts in an interactive mode: + +\footnotesize +\begin{verbatim} +Gnomovision version 69, Copyright (C) {\em year} {\em name of author} +Gnomovision comes with ABSOLUTELY NO WARRANTY; for details +type `show w'. This is free software, and you are welcome +to redistribute it under certain conditions; type `show c' +for details. +\end{verbatim} +\normalsize + +The hypothetical commands {\tt `show w'} and {\tt `show c'} should show the +appropriate parts of the General Public License. Of course, the commands you +use may be called something other than {\tt `show w'} and {\tt `show c'}; they +could even be mouse-clicks or menu items\verb:--:whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + +\footnotesize +\begin{verbatim} +Yoyodyne, Inc., hereby disclaims all copyright +interest in the program `Gnomovision' +(which makes passes at compilers) written +by James Hacker. +{\em signature of Ty Coon}, 1 April 1989 +Ty Coon, President of Vice +\end{verbatim} +\normalsize + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General Public +License instead of this License. +Return to +\elink{GNU's home page}{http://www.gnu.org/home.html}. + +FSF \& GNU inquiries \& questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. Other +\elink{ways to contact}{http://www.gnu.org/home.html\#ContactInfo} the FSF. + +Comments on these web pages to +\elink{webmasters@www.gnu.org}{mailto:webmasters@www.gnu.org}, send other +questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. + +Copyright notice above. +Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +Boston, MA 02110-1301 USA + +Updated: 3 Jan 2000 rms diff --git a/docs/manuals/de/concepts/index.perl b/docs/manuals/de/concepts/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/concepts/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/concepts/latex2html-init.pl b/docs/manuals/de/concepts/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/concepts/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/concepts/lesser.tex b/docs/manuals/de/concepts/lesser.tex new file mode 100644 index 00000000..6fcc81ed --- /dev/null +++ b/docs/manuals/de/concepts/lesser.tex @@ -0,0 +1,573 @@ +%% +%% + +\section*{GNU Lesser General Public License} +\label{LesserChapter} +\index[general]{GNU Lesser General Public License } +\index[general]{License!GNU Lesser General Public } + +\elink{image of a Philosophical GNU} +{\url{http://www.gnu.org/graphics/philosophicalgnu.html}} [ +\elink{English}{\url{http://www.gnu.org/copyleft/lesser.html}} | +\elink{Japanese}{\url{http://www.gnu.org/copyleft/lesser.ja.html}} ] + +\begin{itemize} +\item + \elink{Why you shouldn't use the Lesser GPL for your next + library}{\url{http://www.gnu.org/philosophy/why-not-lgpl.html}} +\item + \elink{What to do if you see a possible LGPL + violation}{\url{http://www.gnu.org/copyleft/gpl-violation.html}} +\item + \elink{Translations of the LGPL} +{\url{http://www.gnu.org/copyleft/copyleft.html\#translationsLGPL}} +\item The GNU Lesser General Public License as a + \elink{text file}{\url{http://www.gnu.org/copyleft/lesser.txt}} +\item The GNU Lesser General Public License as a + \elink{Texinfo}{\url{http://www.gnu.org/copyleft/lesser.texi}} file + \end{itemize} + + +This GNU Lesser General Public License counts as the successor of the GNU +Library General Public License. For an explanation of why this change was +necessary, read the +\elink{Why you shouldn't use the Lesser GPL for your next +library}{\url{http://www.gnu.org/philosophy/why-not-lgpl.html}} article. + +\section{Table of Contents} +\index[general]{Table of Contents } +\index[general]{Contents!Table of } + +\begin{itemize} +\item + \label{TOC12} + \ilink{GNU LESSER GENERAL PUBLIC LICENSE}{SEC12} + +\begin{itemize} +\item + \label{TOC23} + \ilink{Preamble}{SEC23} +\item + \label{TOC34} + \ilink{TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND +MODIFICATION}{SEC34} +\item + \label{TOC45} + \ilink{How to Apply These Terms to Your New Libraries}{SEC45} +\end{itemize} + +\end{itemize} + + +\section{GNU LESSER GENERAL PUBLIC LICENSE} +\label{SEC12} +\index[general]{LICENSE!GNU LESSER GENERAL PUBLIC } +\index[general]{GNU LESSER GENERAL PUBLIC LICENSE } + +Version 2.1, February 1999 + +\footnotesize +\begin{verbatim} +Copyright (C) 1991, 1999 Free Software Foundation, Inc. +51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] +\end{verbatim} +\normalsize + +\section{Preamble} +\label{SEC23} +\index[general]{Preamble } + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public Licenses are intended to +guarantee your freedom to share and change free software\verb:--:to make sure the +software is free for all its users. + +This license, the Lesser General Public License, applies to some specially +designated software packages\verb:--:typically libraries\verb:--:of the Free Software +Foundation and other authors who decide to use it. You can use it too, but we +suggest you first think carefully about whether this license or the ordinary +General Public License is the better strategy to use in any particular case, +based on the explanations below. + +When we speak of free software, we are referring to freedom of use, not price. +Our General Public Licenses are designed to make sure that you have the +freedom to distribute copies of free software (and charge for this service if +you wish); that you receive source code or can get it if you want it; that you +can change the software and use pieces of it in new free programs; and that +you are informed that you can do these things. + +To protect your rights, we need to make restrictions that forbid distributors +to deny you these rights or to ask you to surrender these rights. These +restrictions translate to certain responsibilities for you if you distribute +copies of the library or if you modify it. + +For example, if you distribute copies of the library, whether gratis or for a +fee, you must give the recipients all the rights that we gave you. You must +make sure that they, too, receive or can get the source code. If you link +other code with the library, you must provide complete object files to the +recipients, so that they can relink them with the library after making changes +to the library and recompiling it. And you must show them these terms so they +know their rights. + +We protect your rights with a two-step method: (1) we copyright the library, +and (2) we offer you this license, which gives you legal permission to copy, +distribute and/or modify the library. + +To protect each distributor, we want to make it very clear that there is no +warranty for the free library. Also, if the library is modified by someone +else and passed on, the recipients should know that what they have is not the +original version, so that the original author's reputation will not be +affected by problems that might be introduced by others. + +Finally, software patents pose a constant threat to the existence of any free +program. We wish to make sure that a company cannot effectively restrict the +users of a free program by obtaining a restrictive license from a patent +holder. Therefore, we insist that any patent license obtained for a version of +the library must be consistent with the full freedom of use specified in this +license. + +Most GNU software, including some libraries, is covered by the ordinary GNU +General Public License. This license, the GNU Lesser General Public License, +applies to certain designated libraries, and is quite different from the +ordinary General Public License. We use this license for certain libraries in +order to permit linking those libraries into non-free programs. + +When a program is linked with a library, whether statically or using a shared +library, the combination of the two is legally speaking a combined work, a +derivative of the original library. The ordinary General Public License +therefore permits such linking only if the entire combination fits its +criteria of freedom. The Lesser General Public License permits more lax +criteria for linking other code with the library. + +We call this license the "Lesser" General Public License because it does +Less to protect the user's freedom than the ordinary General Public License. +It also provides other free software developers Less of an advantage over +competing non-free programs. These disadvantages are the reason we use the +ordinary General Public License for many libraries. However, the Lesser +license provides advantages in certain special circumstances. + +For example, on rare occasions, there may be a special need to encourage the +widest possible use of a certain library, so that it becomes a de-facto +standard. To achieve this, non-free programs must be allowed to use the +library. A more frequent case is that a free library does the same job as +widely used non-free libraries. In this case, there is little to gain by +limiting the free library to free software only, so we use the Lesser General +Public License. + +In other cases, permission to use a particular library in non-free programs +enables a greater number of people to use a large body of free software. For +example, permission to use the GNU C Library in non-free programs enables many +more people to use the whole GNU operating system, as well as its variant, the +GNU/Linux operating system. + +Although the Lesser General Public License is Less protective of the users' +freedom, it does ensure that the user of a program that is linked with the +Library has the freedom and the wherewithal to run that program using a +modified version of the Library. + +The precise terms and conditions for copying, distribution and modification +follow. Pay close attention to the difference between a "work based on the +library" and a "work that uses the library". The former contains code +derived from the library, whereas the latter must be combined with the library +in order to run. + +\section{TERMS AND CONDITIONS} +\label{SEC34} +\index[general]{CONDITIONS!TERMS AND } +\index[general]{TERMS AND CONDITIONS } + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +{\bf 0.} This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or other +authorized party saying it may be distributed under the terms of this Lesser +General Public License (also called "this License"). Each licensee is +addressed as "you". + +A "library" means a collection of software functions and/or data prepared so +as to be conveniently linked with application programs (which use some of +those functions and data) to form executables. + +The "Library", below, refers to any such software library or work which has +been distributed under these terms. A "work based on the Library" means +either the Library or any derivative work under copyright law: that is to say, +a work containing the Library or a portion of it, either verbatim or with +modifications and/or translated straightforwardly into another language. +(Hereinafter, translation is included without limitation in the term +"modification".) + +"Source code" for a work means the preferred form of the work for making +modifications to it. For a library, complete source code means all the source +code for all modules it contains, plus any associated interface definition +files, plus the scripts used to control compilation and installation of the +library. + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running a program +using the Library is not restricted, and output from such a program is covered +only if its contents constitute a work based on the Library (independent of +the use of the Library in a tool for writing it). Whether that is true depends +on what the Library does and what the program that uses the Library does. + +{\bf 1.} You may copy and distribute verbatim copies of the Library's complete +source code as you receive it, in any medium, provided that you conspicuously +and appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and distribute a copy of this License +along with the Library. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +{\bf 2.} You may modify your copy or copies of the Library or any portion of +it, thus forming a work based on the Library, and copy and distribute such +modifications or work under the terms of Section 1 above, provided that you +also meet all of these conditions: + +\begin{itemize} +\item {\bf a)} The modified work must itself be a software library. +\item {\bf b)} You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. +\item {\bf c)} You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. +\item {\bf d)} If a facility in the modified Library refers to a function or + a table of data to be supplied by an application program that uses the + facility, other than as an argument passed when the facility is invoked, then +you must make a good faith effort to ensure that, in the event an application +does not supply such function or table, the facility still operates, and +performs whatever part of its purpose remains meaningful. + +(For example, a function in a library to compute square roots has a purpose +that is entirely well-defined independent of the application. Therefore, +Subsection 2d requires that any application-supplied function or table used +by this function must be optional: if the application does not supply it, the +square root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Library, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Library, the distribution of the whole must be +on the terms of this License, whose permissions for other licensees extend to +the entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Library. + +In addition, mere aggregation of another work not based on the Library with +the Library (or with a work based on the Library) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. +\end{itemize} + +{\bf 3.} You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do this, +you must alter all the notices that refer to this License, so that they refer +to the ordinary GNU General Public License, version 2, instead of to this +License. (If a newer version than version 2 of the ordinary GNU General Public +License has appeared, then you can specify that version instead if you wish.) +Do not make any other change in these notices. + +Once this change is made in a given copy, it is irreversible for that copy, so +the ordinary GNU General Public License applies to all subsequent copies and +derivative works made from that copy. + +This option is useful when you wish to copy part of the code of the Library +into a program that is not a library. + +{\bf 4.} You may copy and distribute the Library (or a portion or derivative +of it, under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you accompany it with the complete +corresponding machine-readable source code, which must be distributed under +the terms of Sections 1 and 2 above on a medium customarily used for software +interchange. + +If distribution of object code is made by offering access to copy from a +designated place, then offering equivalent access to copy the source code from +the same place satisfies the requirement to distribute the source code, even +though third parties are not compelled to copy the source along with the +object code. + +{\bf 5.} A program that contains no derivative of any portion of the Library, +but is designed to work with the Library by being compiled or linked with it, +is called a "work that uses the Library". Such a work, in isolation, is not +a derivative work of the Library, and therefore falls outside the scope of +this License. + +However, linking a "work that uses the Library" with the Library creates an +executable that is a derivative of the Library (because it contains portions +of the Library), rather than a "work that uses the library". The executable +is therefore covered by this License. Section 6 states terms for distribution +of such executables. + +When a "work that uses the Library" uses material from a header file that is +part of the Library, the object code for the work may be a derivative work of +the Library even though the source code is not. Whether this is true is +especially significant if the work can be linked without the Library, or if +the work is itself a library. The threshold for this to be true is not +precisely defined by law. + +If such an object file uses only numerical parameters, data structure layouts +and accessors, and small macros and small inline functions (ten lines or less +in length), then the use of the object file is unrestricted, regardless of +whether it is legally a derivative work. (Executables containing this object +code plus portions of the Library will still fall under Section 6.) + +Otherwise, if the work is a derivative of the Library, you may distribute the +object code for the work under the terms of Section 6. Any executables +containing that work also fall under Section 6, whether or not they are linked +directly with the Library itself. + +{\bf 6.} As an exception to the Sections above, you may also combine or link a +"work that uses the Library" with the Library to produce a work containing +portions of the Library, and distribute that work under terms of your choice, +provided that the terms permit modification of the work for the customer's own +use and reverse engineering for debugging such modifications. + +You must give prominent notice with each copy of the work that the Library is +used in it and that the Library and its use are covered by this License. You +must supply a copy of this License. If the work during execution displays +copyright notices, you must include the copyright notice for the Library among +them, as well as a reference directing the user to the copy of this License. +Also, you must do one of these things: + +\begin{itemize} +\item {\bf a)} Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever changes were + used in the work (which must be distributed under Sections 1 and 2 above); +and, if the work is an executable linked with the Library, with the complete +machine-readable "work that uses the Library", as object code and/or source +code, so that the user can modify the Library and then relink to produce a +modified executable containing the modified Library. (It is understood that +the user who changes the contents of definitions files in the Library will +not necessarily be able to recompile the application to use the modified +definitions.) +\item {\bf b)} Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a copy of the + library already present on the user's computer system, rather than copying +library functions into the executable, and (2) will operate properly with a +modified version of the library, if the user installs one, as long as the +modified version is interface-compatible with the version that the work was +made with. +\item {\bf c)} Accompany the work with a written offer, valid for at least + three years, to give the same user the materials specified in Subsection 6a, + above, for a charge no more than the cost of performing this distribution. +\item {\bf d)} If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above specified + materials from the same place. +\item {\bf e)} Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + \end{itemize} + +For an executable, the required form of the "work that uses the Library" +must include any data and utility programs needed for reproducing the +executable from it. However, as a special exception, the materials to be +distributed need not include anything that is normally distributed (in either +source or binary form) with the major components (compiler, kernel, and so on) +of the operating system on which the executable runs, unless that component +itself accompanies the executable. + +It may happen that this requirement contradicts the license restrictions of +other proprietary libraries that do not normally accompany the operating +system. Such a contradiction means you cannot use both them and the Library +together in an executable that you distribute. + +{\bf 7.} You may place library facilities that are a work based on the Library +side-by-side in a single library together with other library facilities not +covered by this License, and distribute such a combined library, provided that +the separate distribution of the work based on the Library and of the other +library facilities is otherwise permitted, and provided that you do these two +things: + +\begin{itemize} +\item {\bf a)} Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library facilities. This must + be distributed under the terms of the Sections above. +\item {\bf b)} Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining where to find + the accompanying uncombined form of the same work. +\end{itemize} + +{\bf 8.} You may not copy, modify, sublicense, link with, or distribute the +Library except as expressly provided under this License. Any attempt otherwise +to copy, modify, sublicense, link with, or distribute the Library is void, and +will automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will not have +their licenses terminated so long as such parties remain in full compliance. + +{\bf 9.} You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or distribute +the Library or its derivative works. These actions are prohibited by law if +you do not accept this License. Therefore, by modifying or distributing the +Library (or any work based on the Library), you indicate your acceptance of +this License to do so, and all its terms and conditions for copying, +distributing or modifying the Library or works based on it. + +{\bf 10.} Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the original +licensor to copy, distribute, link with or modify the Library subject to these +terms and conditions. You may not impose any further restrictions on the +recipients' exercise of the rights granted herein. You are not responsible for +enforcing compliance by third parties with this License. + +{\bf 11.} If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from +the conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Library at all. +For example, if a patent license would not permit royalty-free redistribution +of the Library by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +{\bf 12.} If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Library under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +{\bf 13.} The Free Software Foundation may publish revised and/or new versions +of the Lesser General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software +Foundation. If the Library does not specify a license version number, you may +choose any version ever published by the Free Software Foundation. + +{\bf 14.} If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, write to +the author to ask for permission. For software which is copyrighted by the +Free Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals of +preserving the free status of all derivatives of our free software and of +promoting the sharing and reuse of software generally. + +{\bf NO WARRANTY} + +{\bf 15.} BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE +THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR +IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO +THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY +PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +{\bf 16.} IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO +LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR +THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +END OF TERMS AND CONDITIONS + +\section{How to Apply These Terms to Your New Libraries} +\label{SEC45} +\index[general]{Libraries!How to Apply These Terms to Your New } +\index[general]{How to Apply These Terms to Your New Libraries } + + +If you develop a new library, and you want it to be of the greatest possible +use to the public, we recommend making it free software that everyone can +redistribute and change. You can do so by permitting redistribution under +these terms (or, alternatively, under the terms of the ordinary General Public +License). + +To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + +\footnotesize +\begin{verbatim} +{\it one line to give the library's name and an idea of what it does.} +Copyright (C) {\it year} {\it name of author} +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA +\end{verbatim} +\normalsize + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + +\footnotesize +\begin{verbatim} +Yoyodyne, Inc., hereby disclaims all copyright interest in +the library "Frob" (a library for tweaking knobs) written +by James Random Hacker. +{\it signature of Ty Coon}, 1 April 1990 +Ty Coon, President of Vice +\end{verbatim} +\normalsize + +That's all there is to it! +Return to +\elink{GNU's home page}{\url{http://www.gnu.org/home.html}}. + +FSF \& GNU inquiries \& questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. Other +\elink{ways to contact}{\url{http://www.gnu.org/home.html\#ContactInfo}} the FSF. + +Comments on these web pages to +\elink{webmasters@www.gnu.org}{mailto:webmasters@www.gnu.org}, send other +questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. + +Copyright notice above. +Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +Boston, MA 02110-1301 USA +USA + +Updated: 27 Nov 2000 paulv diff --git a/docs/manuals/de/concepts/license.tex b/docs/manuals/de/concepts/license.tex new file mode 100644 index 00000000..b50269f2 --- /dev/null +++ b/docs/manuals/de/concepts/license.tex @@ -0,0 +1,115 @@ +%% +%% + +\chapter{Bacula Copyright, Trademark, and Licenses} +\label{LicenseChapter} +\index[general]{Licenses!Bacula Copyright Trademark} +\index[general]{Bacula Copyright, Trademark, and Licenses} + +There are a number of different licenses that are used in Bacula. +If you have a printed copy of this manual, the details of each of +the licenses referred to in this chapter can be found in the +online version of the manual at +\elink{http://www.bacula.org}{\url{http://www.bacula.org}}. + +\section{FDL} +\index[general]{FDL } + +The GNU Free Documentation License (FDL) is used for this manual, +which is a free and open license. This means that you may freely +reproduce it and even make changes to it. However, rather than +distribute your own version of this manual, we would much prefer +if you would send any corrections or changes to the Bacula project. + +The most recent version of the manual can always be found online +at \elink{http://www.bacula.org}{\url{http://www.bacula.org}}. + +% TODO: Point to appendix that has it + + +\section{GPL} +\index[general]{GPL } + +The vast bulk of the source code is released under the +\ilink{GNU General Public License version 2.}{GplChapter}. + +Most of this code is copyrighted: Copyright \copyright 2000-2007 +Free Software Foundation Europe e.V. + +Portions may be copyrighted by other people (ATT, the Free Software +Foundation, ...). These files are released under the GPL license. + +\section{LGPL} +\index[general]{LGPL } + +Some of the Bacula library source code is released under the +\ilink{GNU Lesser General Public License.}{LesserChapter} This +permits third parties to use these parts of our code in their proprietary +programs to interface to Bacula. + +\section{Public Domain} +\index[general]{Domain!Public } +\index[general]{Public Domain } + +Some of the Bacula code, or code that Bacula references, has been released +to the public domain. E.g. md5.c, SQLite. + +\section{Trademark} +\index[general]{Trademark } + +Bacula\raisebox{.6ex}{\textsuperscript{\textregistered}} is a registered +trademark of John Walker. + +We have trademarked the Bacula name to ensure that any program using the +name Bacula will be exactly compatible with the program that we have +released. The use of the name Bacula is restricted to software systems +that agree exactly with the program presented here. + +\section{Fiduciary License Agreement} +\index[general]{Fiduciary License Agreement } +Developers who have contributed significant changes to the Bacula code +should have signed a Fiduciary License Agreement (FLA), which +guarantees them the right to use the code they have developed, and also +ensures that the Free Software Foundation Europe (and thus the Bacula +project) has the rights to the code. This Fiduciary License Agreement +is found on the Bacula web site at: + +\elink{http://www.bacula.org/FLA-bacula.en.pdf}{\url{http://www.bacula.org/FLA-bacula.en.pdf}} + +and should be filled out then sent to: + +\begin{quote} + Free Software Foundation Europe \\ + Freedom Task Force \\ + Sumatrastrasse 25 \\ + 8006 Z\"{u}rich \\ + Switzerland \\ +\end{quote} + +Please note that the above address is different from the officially +registered office mentioned in the document. When you send in such a +complete document, please notify me: kern at sibbald dot com. + + +\section{Disclaimer} +\index[general]{Disclaimer } + +NO WARRANTY + +BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE +PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE +PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, +YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY +COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE +PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE +OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR +DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR +A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH +HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. diff --git a/docs/manuals/de/concepts/migration.tex b/docs/manuals/de/concepts/migration.tex new file mode 100644 index 00000000..b0d49df2 --- /dev/null +++ b/docs/manuals/de/concepts/migration.tex @@ -0,0 +1,445 @@ + +\chapter{Migration} +\label{MigrationChapter} +\index[general]{Migration} + +The term Migration, as used in the context of Bacula, means moving data from +one Volume to another. In particular it refers to a Job (similar to a backup +job) that reads data that was previously backed up to a Volume and writes +it to another Volume. As part of this process, the File catalog records +associated with the first backup job are purged. In other words, Migration +moves Bacula Job data from one Volume to another by reading the Job data +from the Volume it is stored on, writing it to a different Volume in a +different Pool, and then purging the database records for the first Job. + +The section process for which Job or Jobs are migrated +can be based on quite a number of different criteria such as: +\begin{itemize} +\item a single previous Job +\item a Volume +\item a Client +\item a regular expression matching a Job, Volume, or Client name +\item the time a Job has been on a Volume +\item high and low water marks (usage or occupation) of a Pool +\item Volume size +\end{itemize} + +The details of these selection criteria will be defined below. + +To run a Migration job, you must first define a Job resource very similar +to a Backup Job but with {\bf Type = Migrate} instead of {\bf Type = +Backup}. One of the key points to remember is that the Pool that is +specified for the migration job is the only pool from which jobs will +be migrated, with one exception noted below. In addition, the Pool to +which the selected Job or Jobs will be migrated is defined by the {\bf +Next Pool = ...} in the Pool resource specified for the Migration Job. + +Bacula permits pools to contain Volumes with different Media Types. +However, when doing migration, this is a very undesirable condition. For +migration to work properly, you should use pools containing only Volumes of +the same Media Type for all migration jobs. + +The migration job normally is either manually started or starts +from a Schedule much like a backup job. It searches +for a previous backup Job or Jobs that match the parameters you have +specified in the migration Job resource, primarily a {\bf Selection Type} +(detailed a bit later). Then for +each previous backup JobId found, the Migration Job will run a new Job which +copies the old Job data from the previous Volume to a new Volume in +the Migration Pool. It is possible that no prior Jobs are found for +migration, in which case, the Migration job will simply terminate having +done nothing, but normally at a minimum, three jobs are involved during a +migration: + +\begin{itemize} +\item The currently running Migration control Job. This is only + a control job for starting the migration child jobs. +\item The previous Backup Job (already run). The File records + for this Job are purged if the Migration job successfully + terminates. The original data remains on the Volume until + it is recycled and rewritten. +\item A new Migration Backup Job that moves the data from the + previous Backup job to the new Volume. If you subsequently + do a restore, the data will be read from this Job. +\end{itemize} + +If the Migration control job finds a number of JobIds to migrate (e.g. +it is asked to migrate one or more Volumes), it will start one new +migration backup job for each JobId found on the specified Volumes. +Please note that Migration doesn't scale too well since Migrations are +done on a Job by Job basis. This if you select a very large volume or +a number of volumes for migration, you may have a large number of +Jobs that start. Because each job must read the same Volume, they will +run consecutively (not simultaneously). + +\section{Migration Job Resource Directives} + +The following directives can appear in a Director's Job resource, and they +are used to define a Migration job. + +\begin{description} +\item [Pool = \lt{}Pool-name\gt{}] The Pool specified in the Migration + control Job is not a new directive for the Job resource, but it is + particularly important because it determines what Pool will be examined for + finding JobIds to migrate. The exception to this is when {\bf Selection + Type = SQLQuery}, in which case no Pool is used, unless you + specifically include it in the SQL query. Note, the Pool resource + referenced must contain a {\bf Next Pool = ...} directive to define + the Pool to which the data will be migrated. + +\item [Type = Migrate] + {\bf Migrate} is a new type that defines the job that is run as being a + Migration Job. A Migration Job is a sort of control job and does not have + any Files associated with it, and in that sense they are more or less like + an Admin job. Migration jobs simply check to see if there is anything to + Migrate then possibly start and control new Backup jobs to migrate the data + from the specified Pool to another Pool. + +\item [Selection Type = \lt{}Selection-type-keyword\gt{}] + The \lt{}Selection-type-keyword\gt{} determines how the migration job + will go about selecting what JobIds to migrate. In most cases, it is + used in conjunction with a {\bf Selection Pattern} to give you fine + control over exactly what JobIds are selected. The possible values + for \lt{}Selection-type-keyword\gt{} are: + \begin{description} + \item [SmallestVolume] This selection keyword selects the volume with the + fewest bytes from the Pool to be migrated. The Pool to be migrated + is the Pool defined in the Migration Job resource. The migration + control job will then start and run one migration backup job for + each of the Jobs found on this Volume. The Selection Pattern, if + specified, is not used. + + \item [OldestVolume] This selection keyword selects the volume with the + oldest last write time in the Pool to be migrated. The Pool to be + migrated is the Pool defined in the Migration Job resource. The + migration control job will then start and run one migration backup + job for each of the Jobs found on this Volume. The Selection + Pattern, if specified, is not used. + + \item [Client] The Client selection type, first selects all the Clients + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Client names, giving + a filtered Client name list. All jobs that were backed up for those + filtered (regexed) Clients will be migrated. + The migration control job will then start and run one migration + backup job for each of the JobIds found for those filtered Clients. + + \item [Volume] The Volume selection type, first selects all the Volumes + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Volume names, giving + a filtered Volume list. All JobIds that were backed up for those + filtered (regexed) Volumes will be migrated. + The migration control job will then start and run one migration + backup job for each of the JobIds found on those filtered Volumes. + + \item [Job] The Job selection type, first selects all the Jobs (as + defined on the {\bf Name} directive in a Job resource) + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Job names, giving + a filtered Job name list. All JobIds that were run for those + filtered (regexed) Job names will be migrated. Note, for a given + Job named, they can be many jobs (JobIds) that ran. + The migration control job will then start and run one migration + backup job for each of the Jobs found. + + \item [SQLQuery] The SQLQuery selection type, used the {\bf Selection + Pattern} as an SQL query to obtain the JobIds to be migrated. + The Selection Pattern must be a valid SELECT SQL statement for your + SQL engine, and it must return the JobId as the first field + of the SELECT. + + \item [PoolOccupancy] This selection type will cause the Migration job + to compute the total size of the specified pool for all Media Types + combined. If it exceeds the {\bf Migration High Bytes} defined in + the Pool, the Migration job will migrate all JobIds beginning with + the oldest Volume in the pool (determined by Last Write time) until + the Pool bytes drop below the {\bf Migration Low Bytes} defined in the + Pool. This calculation should be consider rather approximative because + it is made once by the Migration job before migration is begun, and + thus does not take into account additional data written into the Pool + during the migration. In addition, the calculation of the total Pool + byte size is based on the Volume bytes saved in the Volume (Media) +database + entries. The bytes calculate for Migration is based on the value stored + in the Job records of the Jobs to be migrated. These do not include the + Storage daemon overhead as is in the total Pool size. As a consequence, + normally, the migration will migrate more bytes than strictly necessary. + + \item [PoolTime] The PoolTime selection type will cause the Migration job to + look at the time each JobId has been in the Pool since the job ended. + All Jobs in the Pool longer than the time specified on {\bf Migration Time} + directive in the Pool resource will be migrated. + \end{description} + +\item [Selection Pattern = \lt{}Quoted-string\gt{}] + The Selection Patterns permitted for each Selection-type-keyword are + described above. + + For the OldestVolume and SmallestVolume, this + Selection pattern is not used (ignored). + + For the Client, Volume, and Job + keywords, this pattern must be a valid regular expression that will filter + the appropriate item names found in the Pool. + + For the SQLQuery keyword, this pattern must be a valid SELECT SQL statement + that returns JobIds. + +\end{description} + +\section{Migration Pool Resource Directives} + +The following directives can appear in a Director's Pool resource, and they +are used to define a Migration job. + +\begin{description} +\item [Migration Time = \lt{}time-specification\gt{}] + If a PoolTime migration is done, the time specified here in seconds (time + modifiers are permitted -- e.g. hours, ...) will be used. If the + previous Backup Job or Jobs selected have been in the Pool longer than + the specified PoolTime, then they will be migrated. + +\item [Migration High Bytes = \lt{}byte-specification\gt{}] + This directive specifies the number of bytes in the Pool which will + trigger a migration if a {\bf PoolOccupancy} migration selection + type has been specified. The fact that the Pool + usage goes above this level does not automatically trigger a migration + job. However, if a migration job runs and has the PoolOccupancy selection + type set, the Migration High Bytes will be applied. Bacula does not + currently restrict a pool to have only a single Media Type, so you + must keep in mind that if you mix Media Types in a Pool, the results + may not be what you want, as the Pool count of all bytes will be + for all Media Types combined. + +\item [Migration Low Bytes = \lt{}byte-specification\gt{}] + This directive specifies the number of bytes in the Pool which will + stop a migration if a {\bf PoolOccupancy} migration selection + type has been specified and triggered by more than Migration High + Bytes being in the pool. In other words, once a migration job + is started with {\bf PoolOccupancy} migration selection and it + determines that there are more than Migration High Bytes, the + migration job will continue to run jobs until the number of + bytes in the Pool drop to or below Migration Low Bytes. + +\item [Next Pool = \lt{}pool-specification\gt{}] + The Next Pool directive specifies the pool to which Jobs will be + migrated. This directive is required to define the Pool into which + the data will be migrated. Without this directive, the migration job + will terminate in error. + +\item [Storage = \lt{}storage-specification\gt{}] + The Storage directive specifies what Storage resource will be used + for all Jobs that use this Pool. It takes precedence over any other + Storage specifications that may have been given such as in the + Schedule Run directive, or in the Job resource. We highly recommend + that you define the Storage resource to be used in the Pool rather + than elsewhere (job, schedule run, ...). +\end{description} + +\section{Important Migration Considerations} +\index[general]{Important Migration Considerations} +\begin{itemize} +\item Each Pool into which you migrate Jobs or Volumes {\bf must} + contain Volumes of only one Media Type. + +\item Migration takes place on a JobId by JobId basis. That is + each JobId is migrated in its entirety and independently + of other JobIds. Once the Job is migrated, it will be + on the new medium in the new Pool, but for the most part, + aside from having a new JobId, it will appear with all the + same characteristics of the original job (start, end time, ...). + The column RealEndTime in the catalog Job table will contain the + time and date that the Migration terminated, and by comparing + it with the EndTime column you can tell whether or not the + job was migrated. The original job is purged of its File + records, and its Type field is changed from "B" to "M" to + indicate that the job was migrated. + +\item Jobs on Volumes will be Migration only if the Volume is + marked, Full, Used, or Error. Volumes that are still + marked Append will not be considered for migration. This + prevents Bacula from attempting to read the Volume at + the same time it is writing it. It also reduces other deadlock + situations, as well as avoids the problem that you migrate a + Volume and later find new files appended to that Volume. + +\item As noted above, for the Migration High Bytes, the calculation + of the bytes to migrate is somewhat approximate. + +\item If you keep Volumes of different Media Types in the same Pool, + it is not clear how well migration will work. We recommend only + one Media Type per pool. + +\item It is possible to get into a resource deadlock where Bacula does + not find enough drives to simultaneously read and write all the + Volumes needed to do Migrations. For the moment, you must take + care as all the resource deadlock algorithms are not yet implemented. + +\item Migration is done only when you run a Migration job. If you set a + Migration High Bytes and that number of bytes is exceeded in the Pool + no migration job will automatically start. You must schedule the + migration jobs, and they must run for any migration to take place. + +\item If you migrate a number of Volumes, a very large number of Migration + jobs may start. + +\item Figuring out what jobs will actually be migrated can be a bit complicated + due to the flexibility provided by the regex patterns and the number of + different options. Turning on a debug level of 100 or more will provide + a limited amount of debug information about the migration selection + process. + +\item Bacula currently does only minimal Storage conflict resolution, so you + must take care to ensure that you don't try to read and write to the + same device or Bacula may block waiting to reserve a drive that it + will never find. In general, ensure that all your migration + pools contain only one Media Type, and that you always + migrate to pools with different Media Types. + +\item The {\bf Next Pool = ...} directive must be defined in the Pool + referenced in the Migration Job to define the Pool into which the + data will be migrated. + +\item Pay particular attention to the fact that data is migrated on a Job + by Job basis, and for any particular Volume, only one Job can read + that Volume at a time (no simultaneous read), so migration jobs that + all reference the same Volume will run sequentially. This can be a + potential bottle neck and does not scale very well to large numbers + of jobs. + +\item Only migration of Selection Types of Job and Volume have + been carefully tested. All the other migration methods (time, + occupancy, smallest, oldest, ...) need additional testing. + +\item Migration is only implemented for a single Storage daemon. You + cannot read on one Storage daemon and write on another. +\end{itemize} + + +\section{Example Migration Jobs} +\index[general]{Example Migration Jobs} + +When you specify a Migration Job, you must specify all the standard +directives as for a Job. However, certain such as the Level, Client, and +FileSet, though they must be defined, are ignored by the Migration job +because the values from the original job used instead. + +As an example, suppose you have the following Job that +you run every night. To note: there is no Storage directive in the +Job resource; there is a Storage directive in each of the Pool +resources; the Pool to be migrated (File) contains a Next Pool +directive that defines the output Pool (where the data is written +by the migration job). + +\footnotesize +\begin{verbatim} +# Define the backup Job +Job { + Name = "NightlySave" + Type = Backup + Level = Incremental # default + Client=rufus-fd + FileSet="Full Set" + Schedule = "WeeklyCycle" + Messages = Standard + Pool = Default +} + +# Default pool definition +Pool { + Name = Default + Pool Type = Backup + AutoPrune = yes + Recycle = yes + Next Pool = Tape + Storage = File + LabelFormat = "File" +} + +# Tape pool definition +Pool { + Name = Tape + Pool Type = Backup + AutoPrune = yes + Recycle = yes + Storage = DLTDrive +} + +# Definition of File storage device +Storage { + Name = File + Address = rufus + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = "File" # same as Device in Storage daemon + Media Type = File # same as MediaType in Storage daemon +} + +# Definition of DLT tape storage device +Storage { + Name = DLTDrive + Address = rufus + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = "HP DLT 80" # same as Device in Storage daemon + Media Type = DLT8000 # same as MediaType in Storage daemon +} + +\end{verbatim} +\normalsize + +Where we have included only the essential information -- i.e. the +Director, FileSet, Catalog, Client, Schedule, and Messages resources are +omitted. + +As you can see, by running the NightlySave Job, the data will be backed up +to File storage using the Default pool to specify the Storage as File. + +Now, if we add the following Job resource to this conf file. + +\footnotesize +\begin{verbatim} +Job { + Name = "migrate-volume" + Type = Migrate + Level = Full + Client = rufus-fd + FileSet = "Full Set" + Messages = Standard + Pool = Default + Maximum Concurrent Jobs = 4 + Selection Type = Volume + Selection Pattern = "File" +} +\end{verbatim} +\normalsize + +and then run the job named {\bf migrate-volume}, all volumes in the Pool +named Default (as specified in the migrate-volume Job that match the +regular expression pattern {\bf File} will be migrated to tape storage +DLTDrive because the {\bf Next Pool} in the Default Pool specifies that +Migrations should go to the pool named {\bf Tape}, which uses +Storage {\bf DLTDrive}. + +If instead, we use a Job resource as follows: + +\footnotesize +\begin{verbatim} +Job { + Name = "migrate" + Type = Migrate + Level = Full + Client = rufus-fd + FileSet="Full Set" + Messages = Standard + Pool = Default + Maximum Concurrent Jobs = 4 + Selection Type = Job + Selection Pattern = ".*Save" +} +\end{verbatim} +\normalsize + +All jobs ending with the name Save will be migrated from the File Default to +the Tape Pool, or from File storage to Tape storage. diff --git a/docs/manuals/de/concepts/mtx-changer.txt b/docs/manuals/de/concepts/mtx-changer.txt new file mode 100644 index 00000000..10ef6d1c --- /dev/null +++ b/docs/manuals/de/concepts/mtx-changer.txt @@ -0,0 +1,215 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# +# Created OCT/31/03 by Alexander Kuehn, derived from Ludwig Jaffe's script +# +# Works with the HP C1537A L708 DDS3 +# +#set -x +# these are the labels of the tapes in each virtual slot, not the slots! +labels="PSE-0001 PSE-0002 PSE-0003 PSE-0004 PSE-0005 PSE-0006 PSE-0007 PSE-0008 PSE-0009 PSE-0010 PSE-0011 PSE-0012" + +# who to send a mail to? +recipient=root@localhost +logfile=/var/log/mtx.log + +# Delay in seconds how often to check whether a new tape has been inserted +TAPEDELAY=10 # the default is every 10 seconds +echo `date` ":" $@ >>$logfile + +# change this if mt is not in the path (use different quotes!) +mt=`which mt` +grep=`which grep` +# +# how to run the console application? +console="/usr/local/sbin/console -c /usr/local/etc/console.conf" + +command="$1" + +#TAPEDRIVE0 holds the device/name of your 1st and only drive (Bacula supports only 1 drive currently) +#Read TAPEDRIVE from command line parameters +if [ -z "$2" ] ; then + TAPEDRIVE0=/dev/nsa0 +else + TAPEDRIVE0=$2 +fi + +#Read slot from command line parameters +if [ -z "$3" ] ; then + slot=`expr 1` +else + slot=`expr $3` +fi + +if [ -z "$command" ] ; then + echo "" + echo "The mtx-changer script for Bacula" + echo "---------------------------------" + echo "" + echo "usage: mtx-changer [slot]" + echo " mtx-changer" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of avialable slots." + echo "volumes List avialable slots and the label of the." + echo " tape in it (slot:volume)" + echo "Example:" + echo " mtx-changer load /dev/nst0 1 loads a tape from slot1" + echo " mtx-changer %a %o %S " + echo "" + exit 0 +fi + + +case "$command" in + unload) + # At first do mt -f /dev/st0 offline to unload the tape + # + # Check if you want to fool me + echo "unmount"|$console >/dev/null 2>/dev/null + echo "mtx-changer: Checking if drive is loaded before we unload. Request unload" >>$logfile + if $mt -f $TAPEDRIVE0 status >/dev/null 2>/dev/null ; then # mt says status ok + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + $mt -f $TAPEDRIVE0 rewoffl + else + echo "mtx-changer: *** Don't fool me! *** The Drive $TAPEDRIVE0 is empty." >>$logfile + fi + exit 0 + ;; + + load) + #Let's check if drive is loaded before we load it + echo "mtx-changer: Checking if drive is loaded before we load. I Request loaded" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then # this is wrong, becaus Bacula would try to use the tape if we mount it! +# LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then +# echo "mtx-changer: The Drive $TAPEDRIVE0 is empty." >>$logfile +# else # restore state? +# if [ $LOADEDVOL = $3 ] ; then # requested Volume mounted -> exit +# echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile +# exit +# else # oops, wrong volume +# echo "unmount"|$console >/dev/null 2>/dev/null +# fi +# fi +# fi + if [ -z "$LOADEDVOL" ] ; then + echo "unmount"|$console >/dev/null 2>/dev/null + LOADEDVOL=0 + else + #Check if you want to fool me + if [ $LOADEDVOL = $3 ] ; then + echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile + exit + fi + echo "mtx-changer: The Drive $TAPEDRIVE0 is loaded with the tape $LOADEDVOL" >>$logfile + echo "mtx-changer: Unmounting..." >>$logfile + echo "unmount"|$console >/dev/null 2>/dev/null + fi + echo "mtx-changer: Unloading..." >>$logfile + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + mt -f $TAPEDRIVE0 rewoffl 2>/dev/null + #Now we can load the drive as desired + echo "mtx-changer: Doing mtx -f $1 $2 $3" >>$logfile + # extract label for the mail + count=`expr 1` + for label in $labels ; do + if [ $slot -eq $count ] ; then volume=$label ; fi + count=`expr $count + 1` + done + + mail -s "Bacula needs volume $volume." $recipient </dev/null 2>/dev/null + while [ $? -ne 0 ] ; do + sleep $TAPEDELAY + $mt status >/dev/null 2>/dev/null + done + mail -s "Bacula says thank you." $recipient <>$logfile + echo "Loading finished." ; >>$logfile + echo "$slot" + exit 0 + ;; + + list) + echo "mtx-changer: Requested list" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # try mounting + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # no luck + LOADEDVOL="_no_tape" + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + count=`expr 1` + for label in $labels ; do + if [ "$label" != "$LOADEDVOL" ] ; then + printf "$count " + fi + count=`expr $count + 1` + done + printf "\n" + ;; + + loaded) + echo "mtx-changer: Request loaded, dev $TAPEDRIVE0" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z "$LOADEDVOL" ] ; then # no luck + echo "$TAPEDRIVE0 not mounted!" >>$logfile + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + if [ -z "$LOADEDVOL" ] ; then + LOADEDVOL="_no_tape" >>$logfile + echo "0" + else + count=`expr 1` + for label in $labels ; do + if [ $LOADEDVOL = $label ] ; then echo $count ; fi + count=`expr $count + 1` + done + fi + exit 0 + ;; + + slots) + echo "mtx-changer: Request slots" >>$logfile + count=`expr 0` + for label in $labels ; do + count=`expr $count + 1` + done + echo $count + ;; + + volumes) + echo "mtx-changer: Request volumes" >>$logfile + count=`expr 1` + for label in $labels ; do + printf "$count:$label " + count=`expr $count + 1` + done + printf "\n" + ;; +esac diff --git a/docs/manuals/de/concepts/pools.tex b/docs/manuals/de/concepts/pools.tex new file mode 100644 index 00000000..10217f84 --- /dev/null +++ b/docs/manuals/de/concepts/pools.tex @@ -0,0 +1,429 @@ +%% +%% + +\chapter{Automated Disk Backup} +\label{PoolsChapter} +\index[general]{Volumes!Using Pools to Manage} +\index[general]{Disk!Automated Backup} +\index[general]{Using Pools to Manage Volumes} +\index[general]{Automated Disk Backup} + +If you manage five or ten machines and have a nice tape backup, you don't need +Pools, and you may wonder what they are good for. In this chapter, you will +see that Pools can help you optimize disk storage space. The same techniques +can be applied to a shop that has multiple tape drives, or that wants to mount +various different Volumes to meet their needs. + +The rest of this chapter will give an example involving backup to disk +Volumes, but most of the information applies equally well to tape Volumes. + +\label{TheProblem} +\section{The Problem} +\index[general]{Problem} + +A site that I administer (a charitable organization) had a tape DDS-3 tape +drive that was failing. The exact reason for the failure is still unknown. +Worse yet, their full backup size is about 15GB whereas the capacity of their +broken DDS-3 was at best 8GB (rated 6/12). A new DDS-4 tape drive and the +necessary cassettes was more expensive than their budget could handle. + +\label{TheSolution} +\section{The Solution} +\index[general]{Solution} + +They want to maintain six months of backup data, and be able to access the old +files on a daily basis for a week, a weekly basis for a month, then monthly +for six months. In addition, offsite capability was not needed (well perhaps +it really is, but it was never used). Their daily changes amount to about +300MB on the average, or about 2GB per week. + +As a consequence, the total volume of data they need to keep to meet their +needs is about 100GB (15GB x 6 + 2GB x 5 + 0.3 x 7) = 102.1GB. + +The chosen solution was to buy a 120GB hard disk for next to nothing -- far +less than 1/10th the price of a tape drive and the cassettes to handle the +same amount of data, and to have Bacula write to disk files. + +The rest of this chapter will explain how to setup Bacula so that it would +automatically manage a set of disk files with the minimum sysadmin +intervention. The system has been running since 22 January 2004 until today +(23 June 2007) with no intervention, with the exception of adding +a second 120GB hard disk after a year because their needs grew +over that time to more than the 120GB (168GB to be exact). The only other +intervention I have made is a periodic (about once a year) Bacula upgrade. + +\label{OverallDesign} +\section{Overall Design} +\index[general]{Overall Design} +\index[general]{Design!Overall} + +Getting Bacula to write to disk rather than tape in the simplest case is +rather easy, and is documented in the previous chapter. In addition, all the +directives discussed here are explained in that chapter. We'll leave it to you +to look at the details there. If you haven't read it and are not familiar with +Pools, you probably should at least read it once quickly for the ideas before +continuing here. + +One needs to consider about what happens if we have only a single large Bacula +Volume defined on our hard disk. Everything works fine until the Volume fills, +then Bacula will ask you to mount a new Volume. This same problem applies to +the use of tape Volumes if your tape fills. Being a hard disk and the only one +you have, this will be a bit of a problem. It should be obvious that it is +better to use a number of smaller Volumes and arrange for Bacula to +automatically recycle them so that the disk storage space can be reused. The +other problem with a single Volume, is that until version 2.0.0, +Bacula did not seek within a disk Volume, so restoring a single file can take +more time than one would expect. + +As mentioned, the solution is to have multiple Volumes, or files on the disk. +To do so, we need to limit the use and thus the size of a single Volume, by +time, by number of jobs, or by size. Any of these would work, but we chose to +limit the use of a single Volume by putting a single job in each Volume with +the exception of Volumes containing Incremental backup where there will be 6 +jobs (a week's worth of data) per volume. The details of this will be +discussed shortly. This is a single client backup, so if you have multiple +clients you will need to multiply those numbers by the number of clients, +or use a different system for switching volumes, such as limiting the +volume size. + +The next problem to resolve is recycling of Volumes. As you noted from above, +the requirements are to be able to restore monthly for 6 months, weekly for a +month, and daily for a week. So to simplify things, why not do a Full save +once a month, a Differential save once a week, and Incremental saves daily. +Now since each of these different kinds of saves needs to remain valid for +differing periods, the simplest way to do this (and possibly the only) is to +have a separate Pool for each backup type. + +The decision was to use three Pools: one for Full saves, one for Differential +saves, and one for Incremental saves, and each would have a different number +of volumes and a different Retention period to accomplish the requirements. + +\label{FullPool} +\subsection{Full Pool} +\index[general]{Pool!Full} +\index[general]{Full Pool} + +Putting a single Full backup on each Volume, will require six Full save +Volumes, and a retention period of six months. The Pool needed to do that is: + +\footnotesize +\begin{verbatim} +Pool { + Name = Full-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 6 months + Maximum Volume Jobs = 1 + Label Format = Full- + Maximum Volumes = 9 +} +\end{verbatim} +\normalsize + +Since these are disk Volumes, no space is lost by having separate Volumes for +each backup (done once a month in this case). The items to note are the +retention period of six months (i.e. they are recycled after six months), that +there is one job per volume (Maximum Volume Jobs = 1), the volumes will be +labeled Full-0001, ... Full-0006 automatically. One could have labeled these +manually from the start, but why not use the features of Bacula. + +Six months after the first volume is used, it will be subject to pruning +and thus recycling, so with a maximum of 9 volumes, there should always be +3 volumes available (note, they may all be marked used, but they will be +marked purged and recycled as needed). + +If you have two clients, you would want to set {\bf Maximum Volume Jobs} to +2 instead of one, or set a limit on the size of the Volumes, and possibly +increase the maximum number of Volumes. + + +\label{DiffPool} +\subsection{Differential Pool} +\index[general]{Pool!Differential} +\index[general]{Differential Pool} + +For the Differential backup Pool, we choose a retention period of a bit longer +than a month and ensure that there is at least one Volume for each of the +maximum of five weeks in a month. So the following works: + +\footnotesize +\begin{verbatim} +Pool { + Name = Diff-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 40 days + Maximum Volume Jobs = 1 + Label Format = Diff- + Maximum Volumes = 10 +} +\end{verbatim} +\normalsize + +As you can see, the Differential Pool can grow to a maximum of 9 volumes, +and the Volumes are retained 40 days and thereafter they can be recycled. Finally +there is one job per volume. This, of course, could be tightened up a lot, but +the expense here is a few GB which is not too serious. + +If a new volume is used every week, after 40 days, one will have used 7 +volumes, and there should then always be 3 volumes that can be purged and +recycled. + +See the discussion above concering the Full pool for how to handle multiple +clients. + +\label{IncPool} +\subsection{Incremental Pool} +\index[general]{Incremental Pool} +\index[general]{Pool!Incremental} + +Finally, here is the resource for the Incremental Pool: + +\footnotesize +\begin{verbatim} +Pool { + Name = Inc-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 20 days + Maximum Volume Jobs = 6 + Label Format = Inc- + Maximum Volumes = 7 +} +\end{verbatim} +\normalsize + +We keep the data for 20 days rather than just a week as the needs require. To +reduce the proliferation of volume names, we keep a week's worth of data (6 +incremental backups) in each Volume. In practice, the retention period should +be set to just a bit more than a week and keep only two or three volumes +instead of five. Again, the lost is very little and as the system reaches the +full steady state, we can adjust these values so that the total disk usage +doesn't exceed the disk capacity. + +If you have two clients, the simplest thing to do is to increase the +maximum volume jobs from 6 to 12. As mentioned above, it is also possible +limit the size of the volumes. However, in that case, you will need to +have a better idea of the volume or add sufficient volumes to the pool so +that you will be assured that in the next cycle (after 20 days) there is +at least one volume that is pruned and can be recycled. + + +\label{Example} +\section{The Actual Conf Files} +\index[general]{Files!Actual Conf} +\index[general]{Actual Conf Files} + +The following example shows you the actual files used, with only a few minor +modifications to simplify things. + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { # define myself + Name = bacula-dir + DIRport = 9101 + QueryFile = "/home/bacula/bin/query.sql" + WorkingDirectory = "/home/bacula/working" + PidDirectory = "/home/bacula/working" + Maximum Concurrent Jobs = 1 + Password = " *** CHANGE ME ***" + Messages = Standard +} +# By default, this job will back up to disk in /tmp +Job { + Name = client + Type = Backup + Client = client-fd + FileSet = "Full Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Full Backup Pool = Full-Pool + Incremental Backup Pool = Inc-Pool + Differential Backup Pool = Diff-Pool + Write Bootstrap = "/home/bacula/working/client.bsr" + Priority = 10 +} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Type = Backup + Client = client-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = File + Messages = Standard + Pool = Default + # This creates an ASCII copy of the catalog + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/home/bacula/bin/make_catalog_backup bacula bacula" + # This deletes the copy of the catalog + RunAfterJob = "/home/bacula/bin/delete_catalog_backup" + Write Bootstrap = "/home/bacula/working/BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# Standard Restore template, to be changed by Console program +Job { + Name = "RestoreFiles" + Type = Restore + Client = havana-fd + FileSet="Full Set" + Storage = File + Messages = Standard + Pool = Default + Where = /tmp/bacula-restores +} + + + +# List of files to be backed up +FileSet { + Name = "Full Set" + Include = { Options { signature=SHA1; compression=GZIP9 } + File = / + File = /usr + File = /home + File = /boot + File = /var + File = /opt + } + Exclude = { + File = /proc + File = /tmp + File = /.journal + File = /.fsck + ... + } +} +Schedule { + Name = "WeeklyCycle" + Run = Level=Full 1st sun at 2:05 + Run = Level=Differential 2nd-5th sun at 2:05 + Run = Level=Incremental mon-sat at 2:05 +} + +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup" + Run = Level=Full sun-sat at 2:10 +} + +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include { Options { signature=MD5 } + File = /home/bacula/working/bacula.sql + } +} + +Client { + Name = client-fd + Address = client + FDPort = 9102 + Catalog = MyCatalog + Password = " *** CHANGE ME ***" + AutoPrune = yes # Prune expired Jobs/Files + Job Retention = 6 months + File Retention = 60 days +} + +Storage { + Name = File + Address = localhost + SDPort = 9103 + Password = " *** CHANGE ME ***" + Device = FileStorage + Media Type = File +} + +Catalog { + Name = MyCatalog + dbname = bacula; user = bacula; password = "" +} + +Pool { + Name = Full-Pool + Pool Type = Backup + Recycle = yes # automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 6 months + Maximum Volume Jobs = 1 + Label Format = Full- + Maximum Volumes = 9 +} + +Pool { + Name = Inc-Pool + Pool Type = Backup + Recycle = yes # automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 20 days + Maximum Volume Jobs = 6 + Label Format = Inc- + Maximum Volumes = 7 +} + +Pool { + Name = Diff-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 40 days + Maximum Volume Jobs = 1 + Label Format = Diff- + Maximum Volumes = 10 +} + +Messages { + Name = Standard + mailcommand = "bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" + mail = root@domain.com = all, !skipped + operator = root@domain.com = mount + console = all, !skipped, !saved + append = "/home/bacula/bin/log" = all, !skipped +} +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { # definition of myself + Name = bacula-sd + SDPort = 9103 # Director's port + WorkingDirectory = "/home/bacula/working" + Pid Directory = "/home/bacula/working" +} +Director { + Name = bacula-dir + Password = " *** CHANGE ME ***" +} +Device { + Name = FileStorage + Media Type = File + Archive Device = /files/bacula + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; +} +Messages { + Name = Standard + director = bacula-dir = all +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/concepts/projects.tex b/docs/manuals/de/concepts/projects.tex new file mode 100644 index 00000000..f118e791 --- /dev/null +++ b/docs/manuals/de/concepts/projects.tex @@ -0,0 +1,28 @@ +%% +%% + +\chapter{Bacula Projects} +\label{ProjectsChapter} +\index[general]{Projects!Bacula } +\index[general]{Bacula Projects } + +Once a new major version of Bacula is released, the Bacula +users will vote on a list of new features. This vote is used +as the main element determining what new features will be +implemented for the next version. Generally, the development time +for a new release is between four to nine months. Sometimes it may be +a bit longer, but in that case, there will be a number of bug fix +updates to the currently released version. + +For the current list of project, please see the projects page in the CVS +at: \elink{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/projects} +{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/projects} +see the {\bf projects} file in the main source directory. The projects +file is updated approximately once every six months. + +Separately from the project list, Kern maintains a current list of +tasks as well as ideas, feature requests, and occasionally design +notes. This list is updated roughly weekly (sometimes more often). +For a current list of tasks you can see {\bf kernstodo} in the Source Forge +CVS at \elink{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/kernstodo} +{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/kernstodo}. diff --git a/docs/manuals/de/concepts/python.tex b/docs/manuals/de/concepts/python.tex new file mode 100644 index 00000000..40e1b2e0 --- /dev/null +++ b/docs/manuals/de/concepts/python.tex @@ -0,0 +1,479 @@ +%% +%% + +\chapter{Python Scripting} +\label{PythonChapter} +\index[general]{Python Scripting} +\index[general]{Scripting!Python} + +You may be asking what Python is and why a scripting language is +needed in Bacula. The answer to the first question is that Python +is an Object Oriented scripting language with features similar +to those found in Perl, but the syntax of the language is much +cleaner and simpler. The answer to why have scripting in Bacula is to +give the user more control over the whole backup process. Probably +the simplest example is when Bacula needs a new Volume name, with +a scripting language such as Python, you can generate any name +you want, based on the current state of Bacula. + +\section{Python Configuration} +\index[general]{Python Configuration} +\index[general]{Configuration!Python} + +Python must be enabled during the configuration process by adding +a \verb:--:with-python, and possibly specifying an alternate +directory if your Python is not installed in a standard system +location. If you are using RPMs you will need the python-devel package +installed. + +When Python is configured, it becomes an integral part of Bacula and +runs in Bacula's address space, so even though it is an interpreted +language, it is very efficient. + +When the Director starts, it looks to see if you have a {\bf +Scripts Directory} Directive defined (normal default {\bf +/etc/bacula/scripts}, if so, it looks in that directory for a file named +{\bf DirStartUp.py}. If it is found, Bacula will pass this file to Python +for execution. The {\bf Scripts Directory} is a new directive that you add +to the Director resource of your bacula-dir.conf file. + +Note: Bacula does not install Python scripts by default because these +scripts are for you to program. This means that with a default +installation with Python enabled, Bacula will print the following error +message: + +\begin{verbatim} +09-Jun 15:14 bacula-dir: ERROR in pythonlib.c:131 Could not import +Python script /etc/bacula/scripts/DirStartUp. Python disabled. +\end{verbatim} + +The source code directory {\bf examples/python} contains sample scripts +for DirStartUp.py, SDStartUp.py, and FDStartUp.py that you might want +to use as a starting point. Normally, your scripts directory (at least +where you store the Python scripts) should be writable by Bacula, because +Python will attempt to write a compiled version of the scripts (e.g. +DirStartUp.pyc) back to that directory. + +When starting with the sample scripts, you can delete any part that +you will not need, but you should keep all the Bacula Event and Job Event +definitions. If you do not want a particular event, simply replace the +existing code with a {\bf noop = 1}. + +\section{Bacula Events} +\index[general]{Bacula Events} +\index[general]{Events} +A Bacula event is a point in the Bacula code where Bacula +will call a subroutine (actually a method) that you have +defined in the Python StartUp script. Events correspond +to some significant event such as a Job Start, a Job End, +Bacula needs a new Volume Name, ... When your script is +called, it will have access to all the Bacula variables +specific to the Job (attributes of the Job Object), and +it can even call some of the Job methods (subroutines) +or set new values in the Job attributes, such as the +Priority. You will see below how the events are used. + +\section{Python Objects} +\index[general]{Python Objects} +\index[general]{Objects!Python} + +There are four Python objects that you will need to work with: +\begin{description} +\item [The Bacula Object] + The Bacula object is created by the Bacula daemon (the Director + in the present case) when the daemon starts. It is available to + the Python startup script, {\bf DirStartup.py}, by importing the + Bacula definitions with {\bf import bacula}. The methods + available with this object are described below. + +\item [The Bacula Events Class] + You create this class in the startup script, and you pass + it to the Bacula Object's {\bf set\_events} method. The + purpose of the Bacula Events Class is to define what global + or daemon events you want to monitor. When one of those events + occurs, your Bacula Events Class will be called at the method + corresponding to the event. There are currently three events, + JobStart, JobEnd, and Exit, which are described in detail below. + +\item [The Job Object] + When a Job starts, and assuming you have defined a JobStart method + in your Bacula Events Class, Bacula will create a Job Object. This + object will be passed to the JobStart event. The Job Object has a + has good number of read-only members or attributes providing many + details of the Job, and it also has a number of writable attributes + that allow you to pass information into the Job. These attributes + are described below. + +\item [The Job Events Class] + You create this class in the JobStart method of your Bacula Events + class, and it allows you to define which of the possible Job Object + events you want to see. You must pass an instance of your Job Events + class to the Job Object set\_events() method. + Normally, you will probably only have one + Job Events Class, which will be instantiated for each Job. However, + if you wish to see different events in different Jobs, you may have + as many Job Events classes as you wish. +\end{description} + + +The first thing the startup script must do is to define what global Bacula +events (daemon events), it wants to see. This is done by creating a +Bacula Events class, instantiating it, then passing it to the +{\bf set\_events} method. There are three possible +events. + +\begin{description} +\item [JobStart] + \index[dir]{JobStart} + This Python method, if defined, will be called each time a Job is started. + The method is passed the class instantiation object as the first argument, + and the Bacula Job object as the second argument. The Bacula Job object + has several built-in methods, and you can define which ones you + want called. If you do not define this method, you will not be able + to interact with Bacula jobs. + +\item [JobEnd] + This Python method, if defined, will be called each time a Job terminates. + The method is passed the class instantiation object as the first argument, + and the Bacula Job object as the second argument. + +\item [Exit] + This Python method, if defined, will be called when the Director terminates. + The method is passed the class instantiation object as the first argument. +\end{description} + +Access to the Bacula variables and methods is done with: + + import bacula + +The following are the read-only attributes provided by the bacula object. +\begin{description} +\item [Name] +\item [ConfigFile] +\item [WorkingDir] +\item [Version] string consisting of "Version Build-date" +\end{description} + + +A simple definition of the Bacula Events Class might be the following: + +\footnotesize +\begin{verbatim} +import sys, bacula +class BaculaEvents: + def JobStart(self, job): + ... +\end{verbatim} +\normalsize + +Then to instantiate the class and pass it to Bacula, you +would do: + +\footnotesize +\begin{verbatim} +bacula.set_events(BaculaEvents()) # register Bacula Events wanted +\end{verbatim} +\normalsize + +And at that point, each time a Job is started, your BaculaEvents JobStart +method will be called. + +Now to actually do anything with a Job, you must define which Job events +you want to see, and this is done by defining a JobEvents class containing +the methods you want called. Each method name corresponds to one of the +Job Events that Bacula will generate. + +A simple Job Events class might look like the following: + +\footnotesize +\begin{verbatim} +class JobEvents: + def NewVolume(self, job): + ... +\end{verbatim} +\normalsize + +Here, your JobEvents class method NewVolume will be called each time +the Job needs a new Volume name. To actually register the events defined +in your class with the Job, you must instantiate the JobEvents class and +set it in the Job {\bf set\_events} variable. Note, this is a bit different +from how you registered the Bacula events. The registration process must +be done in the Bacula JobStart event (your method). So, you would modify +Bacula Events (not the Job events) as follows: + +\footnotesize +\begin{verbatim} +import sys, bacula +class BaculaEvents: + def JobStart(self, job): + events = JobEvents() # create instance of Job class + job.set_events(events) # register Job events desired + ... +\end{verbatim} +\normalsize + +When a job event is triggered, the appropriate event definition is +called in the JobEvents class. This is the means by which your Python +script or code gets control. Once it has control, it may read job +attributes, or set them. See below for a list of read-only attributes, +and those that are writable. + +In addition, the Bacula {\bf job} object in the Director has +a number of methods (subroutines) that can be called. They +are: +\begin{description} +\item [set\_events] The set\_events method takes a single + argument, which is the instantiation of the Job Events class + that contains the methods that you want called. The method + names that will be called must correspond to the Bacula + defined events. You may define additional methods but Bacula + will not use them. +\item [run] The run method takes a single string + argument, which is the run command (same as in the Console) + that you want to submit to start a new Job. The value + returned by the run method is the JobId of the job that + started, or -1 if there was an error. +\item [write] The write method is used to be able to send + print output to the Job Report. This will be described later. +\item[cancel] The cancel method takes a single integer argument, + which is a JobId. If JobId is found, it will be canceled. +\item [DoesVolumeExist] The DoesVolumeExist method takes a single + string argument, which is the Volume name, and returns + 1 if the volume exists in the Catalog and 0 if the volume + does not exist. +\end{description} + +The following attributes are read/write within the Director +for the {\bf job} object. + +\begin{description} +\item [Priority] Read or set the Job priority. + Note, that setting a Job Priority is effective only before + the Job actually starts. +\item [Level] This attribute contains a string representing the Job + level, e.g. Full, Differential, Incremental, ... if read. + The level can also be set. +\end{description} + +The following read-only attributes are available within the Director +for the {\bf job} object. + +\begin{description} +\item [Type] This attribute contains a string representing the Job + type, e.g. Backup, Restore, Verify, ... +\item [JobId] This attribute contains an integer representing the + JobId. +\item [Client] This attribute contains a string with the name of the + Client for this job. +\item [NumVols] This attribute contains an integer with the number of + Volumes in the Pool being used by the Job. +\item [Pool] This attribute contains a string with the name of the Pool + being used by the Job. +\item [Storage] This attribute contains a string with the name of the + Storage resource being used by the Job. +\item [Catalog] This attribute contains a string with the name of the + Catalog resource being used by the Job. +\item [MediaType] This attribute contains a string with the name of the + Media Type associated with the Storage resource being used by the Job. +\item [Job] This attribute contains a string containing the name of the + Job resource used by this job (not unique). +\item [JobName] This attribute contains a string representing the full + unique Job name. +\item [JobStatus] This attribute contains a single character string + representing the current Job status. The status may change + during execution of the job. It may take on the following + values: + \begin{description} + \item [C] Created, not yet running + \item [R] Running + \item [B] Blocked + \item [T] Completed successfully + \item [E] Terminated with errors + \item [e] Non-fatal error + \item [f] Fatal error + \item [D] Verify found differences + \item [A] Canceled by user + \item [F] Waiting for Client + \item [S] Waiting for Storage daemon + \item [m] Waiting for new media + \item [M] Waiting for media mount + \item [s] Waiting for storage resource + \item [j] Waiting for job resource + \item [c] Waiting for client resource + \item [d] Waiting on maximum jobs + \item [t] Waiting on start time + \item [p] Waiting on higher priority jobs + \end{description} + +\item [Priority] This attribute contains an integer with the priority + assigned to the job. +\item [CatalogRes] tuple consisting of (DBName, Address, User, + Password, Socket, Port, Database Vendor) taken from the Catalog resource + for the Job with the exception of Database Vendor, which is + one of the following: MySQL, PostgreSQL, SQLite, Internal, + depending on what database you configured. +\item [VolumeName] + After a Volume has been purged, this attribute will contain the + name of that Volume. At other times, this value may have no meaning. +\end{description} + +The following write-only attributes are available within the +Director: + +\begin{description} +\item [JobReport] Send line to the Job Report. +\item [VolumeName] Set a new Volume name. Valid only during the + NewVolume event. +\end{description} + +\section{Python Console Command} +\index[general]{Python Console Command} +\index[general]{Console Command!Python} + +There is a new Console command named {\bf python}. It takes +a single argument {\bf restart}. Example: +\begin{verbatim} + python restart +\end{verbatim} + +This command restarts the Python interpreter in the Director. +This can be useful when you are modifying the DirStartUp script, +because normally Python will cache it, and thus the +script will be read one time. + +\section{Debugging Python Scripts} +\index[general]{Debugging Python Scripts} +In general, you debug your Python scripts by using print statements. +You can also develop your script or important parts of it as a +separate file using the Python interpreter to run it. Once you +have it working correctly, you can then call the script from +within the Bacula Python script (DirStartUp.py). + +If you are having problems loading DirStartUp.py, you will probably +not get any error messages because Bacula can only print Python +error messages after the Python interpreter is started. However, you +may be able to see the error messages by starting Bacula in +a shell window with the {\bf -d1} option on the command line. That +should cause the Python error messages to be printed in the shell +window. + +If you are getting error messages such as the following when +loading DirStartUp.py: + +\begin{verbatim} + Traceback (most recent call last): + File "/etc/bacula/scripts/DirStartUp.py", line 6, in ? + import time, sys, bacula + ImportError: /usr/lib/python2.3/lib-dynload/timemodule.so: undefined + symbol: PyInt_FromLong + bacula-dir: pythonlib.c:134 Python Import error. +\end{verbatim} + +It is because the DirStartUp script is calling a dynamically loaded +module (timemodule.so in the above case) that then tries to use +Python functions exported from the Python interpreter (in this case +PyInt\_FromLong). The way Bacula is currently linked with Python does +not permit this. The solution to the problem is to put such functions +(in this case the import of time into a separate Python script, which +will do your calculations and return the values you want. Then call +(not import) this script from the Bacula DirStartUp.py script, and +it all should work as you expect. + + + + + +\section{Python Example} +\index[general]{Python Example} +\index[general]{Example!Python} + +An example script for the Director startup file is provided in +{\bf examples/python/DirStartup.py} as follows: + +\footnotesize +\begin{verbatim} +# +# Bacula Python interface script for the Director +# + +# You must import both sys and bacula +import sys, bacula + +# This is the list of Bacula daemon events that you +# can receive. +class BaculaEvents(object): + def __init__(self): + # Called here when a new Bacula Events class is + # is created. Normally not used + noop = 1 + + def JobStart(self, job): + """ + Called here when a new job is started. If you want + to do anything with the Job, you must register + events you want to receive. + """ + events = JobEvents() # create instance of Job class + events.job = job # save Bacula's job pointer + job.set_events(events) # register events desired + sys.stderr = events # send error output to Bacula + sys.stdout = events # send stdout to Bacula + jobid = job.JobId; client = job.Client + numvols = job.NumVols + job.JobReport="Python Dir JobStart: JobId=%d Client=%s NumVols=%d\n" % (jobid,client,numvols) + + # Bacula Job is going to terminate + def JobEnd(self, job): + jobid = job.JobId + client = job.Client + job.JobReport="Python Dir JobEnd output: JobId=%d Client=%s.\n" % (jobid, client) + + # Called here when the Bacula daemon is going to exit + def Exit(self, job): + print "Daemon exiting." + +bacula.set_events(BaculaEvents()) # register daemon events desired + +""" + These are the Job events that you can receive. +""" +class JobEvents(object): + def __init__(self): + # Called here when you instantiate the Job. Not + # normally used + noop = 1 + + def JobInit(self, job): + # Called when the job is first scheduled + noop = 1 + + def JobRun(self, job): + # Called just before running the job after initializing + # This is the point to change most Job parameters. + # It is equivalent to the JobRunBefore point. + noop = 1 + + def NewVolume(self, job): + # Called when Bacula wants a new Volume name. The Volume + # name returned, if any, must be stored in job.VolumeName + jobid = job.JobId + client = job.Client + numvol = job.NumVols; + print job.CatalogRes + job.JobReport = "JobId=%d Client=%s NumVols=%d" % (jobid, client, numvol) + job.JobReport="Python before New Volume set for Job.\n" + Vol = "TestA-%d" % numvol + job.JobReport = "Exists=%d TestA-%d" % (job.DoesVolumeExist(Vol), numvol) + job.VolumeName="TestA-%d" % numvol + job.JobReport="Python after New Volume set for Job.\n" + return 1 + + def VolumePurged(self, job): + # Called when a Volume is purged. The Volume name can be referenced + # with job.VolumeName + noop = 1 + + + +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/concepts/recycling.tex b/docs/manuals/de/concepts/recycling.tex new file mode 100644 index 00000000..c2962d51 --- /dev/null +++ b/docs/manuals/de/concepts/recycling.tex @@ -0,0 +1,717 @@ +%% +%% + +\chapter{Automatic Volume Recycling} +\label{RecyclingChapter} +\index[general]{Recycling!Automatic Volume } +\index[general]{Automatic Volume Recycling } + +By default, once Bacula starts writing a Volume, it can append to the +volume, but it will not overwrite the existing data thus destroying it. +However when Bacula {\bf recycles} a Volume, the Volume becomes available +for being reused, and Bacula can at some later time overwrite the previous +contents of that Volume. Thus all previous data will be lost. If the +Volume is a tape, the tape will be rewritten from the beginning. If the +Volume is a disk file, the file will be truncated before being rewritten. + +You may not want Bacula to automatically recycle (reuse) tapes. This would +require a large number of tapes though, and in such a case, it is possible +to manually recycle tapes. For more on manual recycling, see the section +entitled \ilink{ Manually Recycling Volumes}{manualrecycling} below in this +chapter. + +Most people prefer to have a Pool of tapes that are used for daily backups and +recycled once a week, another Pool of tapes that are used for Full backups +once a week and recycled monthly, and finally a Pool of tapes that are used +once a month and recycled after a year or two. With a scheme like this, the +number of tapes in your pool or pools remains constant. + +By properly defining your Volume Pools with appropriate Retention periods, +Bacula can manage the recycling (such as defined above) automatically. + +Automatic recycling of Volumes is controlled by four records in the {\bf +Pool} resource definition in the Director's configuration file. These four +records are: + +\begin{itemize} +\item AutoPrune = yes +\item VolumeRetention = \lt{}time\gt{} +\item Recycle = yes +\item RecyclePool = \lt{}APool\gt{} (\textit{This require bacula 2.1.4 or greater}) +\end{itemize} + +The above three directives are all you need assuming that you fill +each of your Volumes then wait the Volume Retention period before +reusing them. If you want Bacula to stop using a Volume and recycle +it before it is full, you will need to use one or more additional +directives such as: +\begin{itemize} +\item Use Volume Once = yes +\item Volume Use Duration = ttt +\item Maximum Volume Jobs = nnn +\item Maximum Volume Bytes = mmm +\end{itemize} +Please see below and +the \ilink{Basic Volume Management}{DiskChapter} chapter +of this manual for more complete examples. + +Automatic recycling of Volumes is performed by Bacula only when it wants a +new Volume and no appendable Volumes are available in the Pool. It will then +search the Pool for any Volumes with the {\bf Recycle} flag set and whose +Volume Status is {\bf Full}. At that point, the recycling occurs in two steps. +The first is that the Catalog for a Volume must be purged of all Jobs and +Files contained on that Volume, and the second step is the actual recycling of +the Volume. The Volume will be purged if the VolumeRetention period has +expired. When a Volume is marked as Purged, it means that no Catalog records +reference that Volume, and the Volume can be recycled. Until recycling +actually occurs, the Volume data remains intact. If no Volumes can be found +for recycling for any of the reasons stated above, Bacula will request +operator intervention (i.e. it will ask you to label a new volume). + +A key point mentioned above, that can be a source of frustration, is that Bacula +will only recycle purged Volumes if there is no other appendable Volume +available, otherwise, it will always write to an appendable Volume before +recycling even if there are Volume marked as Purged. This preserves your data +as long as possible. So, if you wish to "force" Bacula to use a purged +Volume, you must first ensure that no other Volume in the Pool is marked {\bf +Append}. If necessary, you can manually set a volume to {\bf Full}. The reason +for this is that Bacula wants to preserve the data on your old tapes (even +though purged from the catalog) as long as absolutely possible before +overwriting it. There are also a number of directives such as +{\bf Volume Use Duration} that will automatically mark a volume as {\bf +Used} and thus no longer appendable. + +\label{AutoPruning} +\section{Automatic Pruning} +\index[general]{Automatic Pruning} +\index[general]{Pruning!Automatic} + +As Bacula writes files to tape, it keeps a list of files, jobs, and volumes +in a database called the catalog. Among other things, the database helps +Bacula to decide which files to back up in an incremental or differential +backup, and helps you locate files on past backups when you want to restore +something. However, the catalog will grow larger and larger as time goes +on, and eventually it can become unacceptably large. + +Bacula's process for removing entries from the catalog is called Pruning. +The default is Automatic Pruning, which means that once an entry reaches a +certain age (e.g. 30 days old) it is removed from the catalog. Once a job +has been pruned, you can still restore it from the backup tape, but one +additional step is required: scanning the volume with bscan. The +alternative to Automatic Pruning is Manual Pruning, in which you explicitly +tell Bacula to erase the catalog entries for a volume. You'd usually do +this when you want to reuse a Bacula volume, because there's no point in +keeping a list of files that USED TO BE on a tape. Or, if the catalog is +starting to get too big, you could prune the oldest jobs to save space. +Manual pruning is done with the \ilink{ prune command}{ManualPruning} in +the console. (thanks to Bryce Denney for the above explanation). + +\section{Pruning Directives} +\index[general]{Pruning Directives } +\index[general]{Directives!Pruning } + +There are three pruning durations. All apply to catalog database records and +not to the actual data in a Volume. The pruning (or retention) durations are +for: Volumes (Media records), Jobs (Job records), and Files (File records). +The durations inter-depend a bit because if Bacula prunes a Volume, it +automatically removes all the Job records, and all the File records. Also when +a Job record is pruned, all the File records for that Job are also pruned +(deleted) from the catalog. + +Having the File records in the database means that you can examine all the +files backed up for a particular Job. They take the most space in the catalog +(probably 90-95\% of the total). When the File records are pruned, the Job +records can remain, and you can still examine what Jobs ran, but not the +details of the Files backed up. In addition, without the File records, you +cannot use the Console restore command to restore the files. + +When a Job record is pruned, the Volume (Media record) for that Job can still +remain in the database, and if you do a "list volumes", you will see the +volume information, but the Job records (and its File records) will no longer +be available. + +In each case, pruning removes information about where older files are, but it +also prevents the catalog from growing to be too large. You choose the +retention periods in function of how many files you are backing up and the +time periods you want to keep those records online, and the size of the +database. You can always re-insert the records (with 98\% of the original data) +by using "bscan" to scan in a whole Volume or any part of the volume that +you want. + +By setting {\bf AutoPrune} to {\bf yes} you will permit {\bf Bacula} to +automatically prune all Volumes in the Pool when a Job needs another Volume. +Volume pruning means removing records from the catalog. It does not shrink the +size of the Volume or affect the Volume data until the Volume gets +overwritten. When a Job requests another volume and there are no Volumes with +Volume Status {\bf Append} available, Bacula will begin volume pruning. This +means that all Jobs that are older than the {\bf VolumeRetention} period will +be pruned from every Volume that has Volume Status {\bf Full} or {\bf Used} +and has Recycle set to {\bf yes}. Pruning consists of deleting the +corresponding Job, File, and JobMedia records from the catalog database. No +change to the physical data on the Volume occurs during the pruning process. +When all files are pruned from a Volume (i.e. no records in the catalog), the +Volume will be marked as {\bf Purged} implying that no Jobs remain on the +volume. The Pool records that control the pruning are described below. + +\begin{description} + +\item [AutoPrune = \lt{}yes|no\gt{}] + \index[console]{AutoPrune } + If AutoPrune is set to {\bf yes} (default), Bacula + will automatically apply the Volume retention period when running a Job and + it needs a new Volume but no appendable volumes are available. At that point, + Bacula will prune all Volumes that can be pruned (i.e. AutoPrune set) in an + attempt to find a usable volume. If during the autoprune, all files are + pruned from the Volume, it will be marked with VolStatus {\bf Purged}. The + default is {\bf yes}. Note, that although the File and Job records may be + pruned from the catalog, a Volume will be marked Purged (and hence + ready for recycling) if the Volume status is Append, Full, Used, or Error. + If the Volume has another status, such as Archive, Read-Only, Disabled, + Busy, or Cleaning, the Volume status will not be changed to Purged. + +\item [Volume Retention = \lt{}time-period-specification\gt{}] + \index[console]{Volume Retention} + The Volume Retention record defines the length of time that Bacula will + guarantee that the Volume is not reused counting from the time the last + job stored on the Volume terminated. A key point is that this time + period is not even considered as long at the Volume remains appendable. + The Volume Retention period count down begins only when the Append + status has been changed to some othe status (Full, Used, Purged, ...). + + When this time period expires, and if {\bf AutoPrune} is set to {\bf + yes}, and a new Volume is needed, but no appendable Volume is available, + Bacula will prune (remove) Job records that are older than the specified + Volume Retention period. + + The Volume Retention period takes precedence over any Job Retention + period you have specified in the Client resource. It should also be + noted, that the Volume Retention period is obtained by reading the + Catalog Database Media record rather than the Pool resource record. + This means that if you change the VolumeRetention in the Pool resource + record, you must ensure that the corresponding change is made in the + catalog by using the {\bf update pool} command. Doing so will insure + that any new Volumes will be created with the changed Volume Retention + period. Any existing Volumes will have their own copy of the Volume + Retention period that can only be changed on a Volume by Volume basis + using the {\bf update volume} command. + + When all file catalog entries are removed from the volume, its VolStatus is + set to {\bf Purged}. The files remain physically on the Volume until the + volume is overwritten. + + Retention periods are specified in seconds, minutes, hours, days, weeks, + months, quarters, or years on the record. See the + \ilink{Configuration chapter}{Time} of this manual for + additional details of time specification. + +The default is 1 year. +% TODO: if that is the format, should it be in quotes? decide on a style + +\item [Recycle = \lt{}yes|no\gt{}] + \index[fd]{Recycle } + This statement tells Bacula whether or not the particular Volume can be + recycled (i.e. rewritten). If Recycle is set to {\bf no} (the + default), then even if Bacula prunes all the Jobs on the volume and it + is marked {\bf Purged}, it will not consider the tape for recycling. If + Recycle is set to {\bf yes} and all Jobs have been pruned, the volume + status will be set to {\bf Purged} and the volume may then be reused + when another volume is needed. If the volume is reused, it is relabeled + with the same Volume Name, however all previous data will be lost. + \end{description} + + It is also possible to "force" pruning of all Volumes in the Pool + associated with a Job by adding {\bf Prune Files = yes} to the Job resource. + +\label{Recycling} +\label{RecyclingAlgorithm} +\section{Recycling Algorithm} +\index[general]{Algorithm!Recycling } +\index[general]{Recycling Algorithm } + +After all Volumes of a Pool have been pruned (as mentioned above, this happens +when a Job needs a new Volume and no appendable Volumes are available), Bacula +will look for the oldest Volume that is Purged (all Jobs and Files expired), +and if the {\bf Recycle} flag is on (Recycle=yes) for that Volume, Bacula will +relabel it and write new data on it. + +As mentioned above, there are two key points for getting a Volume +to be recycled. First, the Volume must no longer be marked Append (there +are a number of directives to automatically make this change), and second +since the last write on the Volume, one or more of the Retention periods +must have expired so that there are no more catalog backup job records +that reference that Volume. Once both those conditions are satisfied, +the volume can be marked Purged and hence recycled. + +The full algorithm that Bacula uses when it needs a new Volume is: +\index[general]{New Volume Algorithm} +\index[general]{Algorithm!New Volume} + +The algorithm described below assumes that AutoPrune is enabled, +that Recycling is turned on, and that you have defined +appropriate Retention periods, or used the defaults for all these +items. + +\begin{itemize} +\item If the request is for an Autochanger device, look only + for Volumes in the Autochanger (i.e. with InChanger set and that have + the correct Storage device). +\item Search the Pool for a Volume with VolStatus=Append (if there is more + than one, the Volume with the oldest date last written is chosen. If + two have the same date then the one with the lowest MediaId is chosen). +\item Search the Pool for a Volume with VolStatus=Recycle and the InChanger + flag is set true (if there is more than one, the Volume with the oldest + date last written is chosen. If two have the same date then the one + with the lowest MediaId is chosen). +\item Try recycling any purged Volumes. +\item Prune volumes applying Volume retention period (Volumes with VolStatus + Full, Used, or Append are pruned). Note, even if all the File and Job + records are pruned from a Volume, the Volume will not be marked Purged + until the Volume retention period expires. +\item Search the Pool for a Volume with VolStatus=Purged +\item If a Pool named "Scratch" exists, search for a Volume and if found + move it to the current Pool for the Job and use it. Note, when + the Scratch Volume is moved into the current Pool, the basic + Pool defaults are applied as if it is a newly labeled Volume + (equivalent to an {\bf update volume from pool} command). +\item If we were looking for Volumes in the Autochanger, go back to + step 2 above, but this time, look for any Volume whether or not + it is in the Autochanger. +\item Attempt to create a new Volume if automatic labeling enabled + If Python is enabled, a Python NewVolume event is generated before + the Label Format directve is used. If the maximum number of Volumes + specified for the pool is reached, a new Volume will not be created. +\item Prune the oldest Volume if RecycleOldestVolume=yes (the Volume with the + oldest LastWritten date and VolStatus equal to Full, Recycle, Purged, Used, + or Append is chosen). This record ensures that all retention periods are + properly respected. +\item Purge the oldest Volume if PurgeOldestVolume=yes (the Volume with the + oldest LastWritten date and VolStatus equal to Full, Recycle, Purged, Used, + or Append is chosen). We strongly recommend against the use of {\bf + PurgeOldestVolume} as it can quite easily lead to loss of current backup + data. +\item Give up and ask operator. +\end{itemize} + +The above occurs when Bacula has finished writing a Volume or when no Volume +is present in the drive. + +On the other hand, if you have inserted a different Volume after the last job, +and Bacula recognizes the Volume as valid, it will request authorization from +the Director to use this Volume. In this case, if you have set {\bf Recycle +Current Volume = yes} and the Volume is marked as Used or Full, Bacula will +prune the volume and if all jobs were removed during the pruning (respecting +the retention periods), the Volume will be recycled and used. + +The recycling algorithm in this case is: +\begin{itemize} +\item If the VolStatus is {\bf Append} or {\bf Recycle} + is set, the volume will be used. +\item If {\bf Recycle Current Volume} is set and the volume is marked {\bf + Full} or {\bf Used}, Bacula will prune the volume (applying the retention + period). If all Jobs are pruned from the volume, it will be recycled. +\end{itemize} + +This permits users to manually change the Volume every day and load tapes in +an order different from what is in the catalog, and if the volume does not +contain a current copy of your backup data, it will be used. + +A few points from Alan Brown to keep in mind: + +\begin{enumerate} +\item If a pool doesn't have maximum volumes defined then Bacula will prefer to + demand new volumes over forcibly purging older volumes. + +\item If volumes become free through pruning and the Volume retention period has + expired, then they get marked as "purged" and are immediately available for + recycling - these will be used in preference to creating new volumes. + +\item If the Job, File, and Volume retention periods are different, then + it's common to see a tape with no files or jobs listed in the database, + but which is still not marked as "purged". +\end{enumerate} + + +\section{Recycle Status} +\index[general]{Status!Recycle } +\index[general]{Recycle Status } + +Each Volume inherits the Recycle status (yes or no) from the Pool resource +record when the Media record is created (normally when the Volume is labeled). +This Recycle status is stored in the Media record of the Catalog. Using +the Console program, you may subsequently change the Recycle status for each +Volume. For example in the following output from {\bf list volumes}: + +\footnotesize +\begin{verbatim} ++----------+-------+--------+---------+------------+--------+-----+ +| VolumeNa | Media | VolSta | VolByte | LastWritte | VolRet | Rec | ++----------+-------+--------+---------+------------+--------+-----+ +| File0001 | File | Full | 4190055 | 2002-05-25 | 14400 | 1 | +| File0002 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0003 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0004 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0005 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0006 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0007 | File | Purged | 1896466 | 2002-05-26 | 14400 | 1 | ++----------+-------+--------+---------+------------+--------+-----+ +\end{verbatim} +\normalsize + +all the volumes are marked as recyclable, and the last Volume, {\bf File0007} +has been purged, so it may be immediately recycled. The other volumes are all +marked recyclable and when their Volume Retention period (14400 seconds or four +hours) expires, they will be eligible for pruning, and possibly recycling. +Even though Volume {\bf File0007} has been purged, all the data on the Volume +is still recoverable. A purged Volume simply means that there are no entries +in the Catalog. Even if the Volume Status is changed to {\bf Recycle}, the +data on the Volume will be recoverable. The data is lost only when the Volume +is re-labeled and re-written. + +To modify Volume {\bf File0001} so that it cannot be recycled, you use the +{\bf update volume pool=File} command in the console program, or simply {\bf +update} and Bacula will prompt you for the information. + +\footnotesize +\begin{verbatim} ++----------+------+-------+---------+-------------+-------+-----+ +| VolumeNa | Media| VolSta| VolByte | LastWritten | VolRet| Rec | ++----------+------+-------+---------+-------------+-------+-----+ +| File0001 | File | Full | 4190055 | 2002-05-25 | 14400 | 0 | +| File0002 | File | Full | 1897236 | 2002-05-26 | 14400 | 1 | +| File0003 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0004 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0005 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0006 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0007 | File | Purged| 1896466 | 2002-05-26 | 14400 | 1 | ++----------+------+-------+---------+-------------+-------+-----+ +\end{verbatim} +\normalsize + +In this case, {\bf File0001} will never be automatically recycled. The same +effect can be achieved by setting the Volume Status to Read-Only. + +As you have noted, the Volume Status (VolStatus) column in the +catalog database contains the current status of the Volume, which +is normally maintained automatically by Bacula. To give you an +idea of some of the values it can take during the life cycle of +a Volume, here is a picture created by Arno Lehmann: + +\footnotesize +\begin{verbatim} +A typical volume life cycle is like this: + + because job count or size limit exceeded + Append ----------------------------------------> Used + ^ | + | First Job writes to Retention time passed | + | the volume and recycling takes | + | place | + | v + Recycled <-------------------------------------- Purged + Volume is selected for reuse + +\end{verbatim} +\normalsize + + +\section{Making Bacula Use a Single Tape} +\label{singletape} +\index[general]{Tape!Making Bacula Use a Single} +\index[general]{Making Bacula Use a Single Tape} + +Most people will want Bacula to fill a tape and when it is full, a new tape +will be mounted, and so on. However, as an extreme example, it is possible for +Bacula to write on a single tape, and every night to rewrite it. To get this +to work, you must do two things: first, set the VolumeRetention to less than +your save period (one day), and the second item is to make Bacula mark the +tape as full after using it once. This is done using {\bf UseVolumeOnce = +yes}. If this latter record is not used and the tape is not full after the +first time it is written, Bacula will simply append to the tape and eventually +request another volume. Using the tape only once, forces the tape to be marked +{\bf Full} after each use, and the next time {\bf Bacula} runs, it will +recycle the tape. + +An example Pool resource that does this is: + +\footnotesize +\begin{verbatim} +Pool { + Name = DDS-4 + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 12h # expire after 12 hours + Recycle = yes +} +\end{verbatim} +\normalsize + +\section{Daily, Weekly, Monthly Tape Usage Example} +\label{usageexample} +\index[general]{Daily, Weekly, Monthly Tape Usage Example } +\index[general]{Example!Daily Weekly Monthly Tape Usage } + +This example is meant to show you how one could define a fixed set of volumes +that Bacula will rotate through on a regular schedule. There are an infinite +number of such schemes, all of which have various advantages and +disadvantages. + +We start with the following assumptions: + +\begin{itemize} +\item A single tape has more than enough capacity to do a full save. +\item There are ten tapes that are used on a daily basis for incremental + backups. They are prelabeled Daily1 ... Daily10. +\item There are four tapes that are used on a weekly basis for full backups. + They are labeled Week1 ... Week4. +\item There are 12 tapes that are used on a monthly basis for full backups. + They are numbered Month1 ... Month12 +\item A full backup is done every Saturday evening (tape inserted Friday + evening before leaving work). +\item No backups are done over the weekend (this is easy to change). +\item The first Friday of each month, a Monthly tape is used for the Full + backup. +\item Incremental backups are done Monday - Friday (actually Tue-Fri + mornings). +% TODO: why this "actually"? does this need to be explained? + \end{itemize} + +We start the system by doing a Full save to one of the weekly volumes or one +of the monthly volumes. The next morning, we remove the tape and insert a +Daily tape. Friday evening, we remove the Daily tape and insert the next tape +in the Weekly series. Monday, we remove the Weekly tape and re-insert the +Daily tape. On the first Friday of the next month, we insert the next Monthly +tape in the series rather than a Weekly tape, then continue. When a Daily tape +finally fills up, {\bf Bacula} will request the next one in the series, and +the next day when you notice the email message, you will mount it and {\bf +Bacula} will finish the unfinished incremental backup. + +What does this give? Well, at any point, you will have the last complete +Full save plus several Incremental saves. For any given file you want to +recover (or your whole system), you will have a copy of that file every day +for at least the last 14 days. For older versions, you will have at least three +and probably four Friday full saves of that file, and going back further, you +will have a copy of that file made on the beginning of the month for at least +a year. + +So you have copies of any file (or your whole system) for at least a year, but +as you go back in time, the time between copies increases from daily to weekly +to monthly. + +What would the Bacula configuration look like to implement such a scheme? + +\footnotesize +\begin{verbatim} +Schedule { + Name = "NightlySave" + Run = Level=Full Pool=Monthly 1st sat at 03:05 + Run = Level=Full Pool=Weekly 2nd-5th sat at 03:05 + Run = Level=Incremental Pool=Daily tue-fri at 03:05 +} +Job { + Name = "NightlySave" + Type = Backup + Level = Full + Client = LocalMachine + FileSet = "File Set" + Messages = Standard + Storage = DDS-4 + Pool = Daily + Schedule = "NightlySave" +} +# Definition of file storage device +Storage { + Name = DDS-4 + Address = localhost + SDPort = 9103 + Password = XXXXXXXXXXXXX + Device = FileStorage + Media Type = 8mm +} +FileSet { + Name = "File Set" + Include = signature=MD5 { + fffffffffffffffff + } + Exclude = { *.o } +} +Pool { + Name = Daily + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 10d # recycle in 10 days + Maximum Volumes = 10 + Recycle = yes +} +Pool { + Name = Weekly + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 30d # recycle in 30 days (default) + Recycle = yes +} +Pool { + Name = Monthly + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 365d # recycle in 1 year + Recycle = yes +} +\end{verbatim} +\normalsize + +\section{ Automatic Pruning and Recycling Example} +\label{PruningExample} +\index[general]{Automatic Pruning and Recycling Example } +\index[general]{Example!Automatic Pruning and Recycling } + +Perhaps the best way to understand the various resource records that come into +play during automatic pruning and recycling is to run a Job that goes through +the whole cycle. If you add the following resources to your Director's +configuration file: + +\footnotesize +\begin{verbatim} +Schedule { + Name = "30 minute cycle" + Run = Level=Full Pool=File Messages=Standard Storage=File + hourly at 0:05 + Run = Level=Full Pool=File Messages=Standard Storage=File + hourly at 0:35 +} +Job { + Name = "Filetest" + Type = Backup + Level = Full + Client=XXXXXXXXXX + FileSet="Test Files" + Messages = Standard + Storage = File + Pool = File + Schedule = "30 minute cycle" +} +# Definition of file storage device +Storage { + Name = File + Address = XXXXXXXXXXX + SDPort = 9103 + Password = XXXXXXXXXXXXX + Device = FileStorage + Media Type = File +} +FileSet { + Name = "Test Files" + Include = signature=MD5 { + fffffffffffffffff + } + Exclude = { *.o } +} +Pool { + Name = File + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "File" + AutoPrune = yes + VolumeRetention = 4h + Maximum Volumes = 12 + Recycle = yes +} +\end{verbatim} +\normalsize + +Where you will need to replace the {\bf ffffffffff}'s by the appropriate files +to be saved for your configuration. For the FileSet Include, choose a +directory that has one or two megabytes maximum since there will probably be +approximately eight copies of the directory that {\bf Bacula} will cycle through. + +In addition, you will need to add the following to your Storage daemon's +configuration file: + +\footnotesize +\begin{verbatim} +Device { + Name = FileStorage + Media Type = File + Archive Device = /tmp + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +With the above resources, Bacula will start a Job every half hour that saves a +copy of the directory you chose to /tmp/File0001 ... /tmp/File0012. After 4 +hours, Bacula will start recycling the backup Volumes (/tmp/File0001 ...). You +should see this happening in the output produced. Bacula will automatically +create the Volumes (Files) the first time it uses them. + +To turn it off, either delete all the resources you've added, or simply +comment out the {\bf Schedule} record in the {\bf Job} resource. + +\section{Manually Recycling Volumes} +\label{manualrecycling} +\index[general]{Volumes!Manually Recycling } +\index[general]{Manually Recycling Volumes } + +Although automatic recycling of Volumes is implemented in version 1.20 and +later (see the +\ilink{Automatic Recycling of Volumes}{RecyclingChapter} chapter of +this manual), you may want to manually force reuse (recycling) of a Volume. + +Assuming that you want to keep the Volume name, but you simply want to write +new data on the tape, the steps to take are: + +\begin{itemize} +\item Use the {\bf update volume} command in the Console to ensure that the + {\bf Recycle} field is set to {\bf 1} +\item Use the {\bf purge jobs volume} command in the Console to mark the + Volume as {\bf Purged}. Check by using {\bf list volumes}. +\end{itemize} + +Once the Volume is marked Purged, it will be recycled the next time a Volume +is needed. + +If you wish to reuse the tape by giving it a new name, follow the following +steps: + +\begin{itemize} +\item Use the {\bf purge jobs volume} command in the Console to mark the + Volume as {\bf Purged}. Check by using {\bf list volumes}. +\item In Bacula version 1.30 or greater, use the Console {\bf relabel} + command to relabel the Volume. +\end{itemize} + +Please note that the relabel command applies only to tape Volumes. + +For Bacula versions prior to 1.30 or to manually relabel the Volume, use the +instructions below: + +\begin{itemize} +\item Use the {\bf delete volume} command in the Console to delete the Volume + from the Catalog. +\item If a different tape is mounted, use the {\bf unmount} command, + remove the tape, and insert the tape to be renamed. +\item Write an EOF mark in the tape using the following commands: + +\footnotesize +\begin{verbatim} + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +where you replace {\bf /dev/nst0} with the appropriate device name on your +system. +\item Use the {\bf label} command to write a new label to the tape and to + enter it in the catalog. +\end{itemize} + +Please be aware that the {\bf delete} command can be dangerous. Once it is +done, to recover the File records, you must either restore your database as it +was before the {\bf delete} command, or use the {\bf bscan} utility program to +scan the tape and recreate the database entries. diff --git a/docs/manuals/de/concepts/requirements.tex b/docs/manuals/de/concepts/requirements.tex new file mode 100644 index 00000000..fc85919d --- /dev/null +++ b/docs/manuals/de/concepts/requirements.tex @@ -0,0 +1,35 @@ +%% +%% + +\chapter{Systemvoraussetzungen} +\label{SysReqs} +\index[general]{Systemvoraussetzungen } +\index[general]{Voraussetzungen!des System } + +\begin{itemize} +\item {\bf Bacula} ist auf RedHat-Linux, FreeBSD- und + Solaris-Systemen kompiliert und installiert worden. +\item Zur Kompilierung ben\"{o}tigen Sie GNU C++ in der Version 2.95 oder h\"{o}her. Sie k\"{o}nnen es mit anderen Compilern oder \"{a}lteren Versionen versuchen, doch bieten wir hierf\"{u}r keine Unterst\"{u}tzung. +Wir haben Bacula unter RH8.0/RH9/RHEL 3.0/FC3 mit GCC 3.4 erfolgreich kompiliert und verwendet. +Beachten Sie bitte, dass GNU C++ normalerweise ein eigenes Paket (z.B. RPM) neben GNU C ist. Auf RedHat-Systemen ist der C++-Compiler im RPM-Paket {\bf gcc-c++}. + +\item Bacula ben\"{o}tigt bestimmte Pakete von Drittanbietern, die Sie au{\ss}er ``MySQL'' und ``PostgreSQL'' alle in den Releases {\bf depkgs} und {\bf depkgs1} finden. + +\item Wenn Sie die Win32-Quelldateien kompilieren wollen, ben\"{o}tigen Sie einen Microsoft + Visual C++-Compiler (oder Visual Studio). Obwohl sich alle Komponenten kompilieren lassen + (Console bringt einige Warnmeldungen), wurde nur der File-D\"{a}mon getestet. + +\item {\bf Bacula} erfordert um zu funktionieren eine gute Implementierung der PThreads. Auf einigen BSD-Systemen ist das nicht gegeben. + +\item Bei der Codierung achteten wir auf Portabilit\"{a}t. Daher ist der Code gr\"{o}{\ss}tenteils POSIX-kompatibel und m\"{u}sste sich daher verh\"{a}ltnism\"{a}{\ss}ig leicht auf POSIX-Systeme \"{u}bertragen lassen. + +\item Die GNOME-Konsole wurde unter GNOME 2.x. entwickelt und getestet. Sie l\"{a}uft auch unter GNOME 1.4, doch ist diese Version veraltet und wird daher nicht mehr gewartet. + +\item Das wxWidgets-Konsolenprogramm wurde mit der letzten stabilen ANSI- (nicht Unicode-)Version von \elink{wxWidgets}{http://www.wxwidgets.org/} (2.6.1) entwickelt und getestet. Es arbeitet gut mit der Windows- und GTK+-Version von wxWidgets zusammen und sollte auch auf anderen Plattformen laufen, die wxWidgets unterst\"{u}tzen. + +\item Das Tray-Monitorprogramm wurde f\"{u}r GTK+-2.x entwickelt. Es ben\"{o}tigt Gnome in der Version 2.2 oder h\"{o}her, KDE in der Version 3.1 oder h\"{o}her oder einen anderen Window-Manager, der den Standard f\"{u}r System-Trays von \elink{FreeDesktop}{http://www.freedesktop.org/Standards/systemtray-spec} unterst\"{u}tzt. + +\item Wenn sie eine Kommandozeileneditierung und -history nutzen wollen, brauchen sie die Headerdatei /usr/include/termcap.h und m\"{u}ssen entweder die ``Termcap''- oder die ``Ncurses''- Bibliothek geladen haben (libtermcap-devel oder ncurses-devel). + +\item Wenn sie DVDs als Sicherungsmedium benutzen wollen, m\"{u}ssen Sie sich die \elink{dvd+rw-tools 5.21.4.10.10.8}{http://fy.chalmers.se/~appro/linux/DVD+RW/} herunterladen. Benutzen sie den \elink{patch}{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/patches/dvd+rw-tools-5.21.4.10.8.bacula.patch}, um diese Hilfsprogramme zu Bacula kompatibel zu machen, kompilieren und installieren Sie sie. Verwenden Sie nicht die ``dvd+rw-tools'', die Ihrer Distribution beiliegen. Diese werden zusammen mit Bacula nicht funktionieren. +\end{itemize} diff --git a/docs/manuals/de/concepts/restore.tex b/docs/manuals/de/concepts/restore.tex new file mode 100644 index 00000000..05e23fd6 --- /dev/null +++ b/docs/manuals/de/concepts/restore.tex @@ -0,0 +1,1438 @@ +%% +%% +\chapter{The Restore Command} +\label{RestoreChapter} +\index[general]{Command!Console Restore} +\index[general]{Console Restore Command} + +\section{General} +\index[general]{General } + +Below, we will discuss restoring files with the Console {\bf restore} command, +which is the recommended way of doing restoring files. It is not possible +to restore files by automatically starting a job as you do with Backup, +Verify, ... jobs. However, in addition to the console restore command, +there is a standalone program named {\bf bextract}, which also permits +restoring files. For more information on this program, please see the +\ilink{Bacula Utility Programs}{bextract} chapter of this manual. We +don't particularly recommend the {\bf bextract} program because it +lacks many of the features of the normal Bacula restore, such as the +ability to restore Win32 files to Unix systems, and the ability to +restore access control lists (ACL). As a consequence, we recommend, +wherever possible to use Bacula itself for restores as described below. + +You may also want to look at the {\bf bls} program in the same chapter, +which allows you to list the contents of your Volumes. Finally, if you +have an old Volume that is no longer in the catalog, you can restore the +catalog entries using the program named {\bf bscan}, documented in the same +\ilink{Bacula Utility Programs}{bscan} chapter. + +In general, to restore a file or a set of files, you must run a {\bf restore} +job. That is a job with {\bf Type = Restore}. As a consequence, you will need +a predefined {\bf restore} job in your {\bf bacula-dir.conf} (Director's +config) file. The exact parameters (Client, FileSet, ...) that you define are +not important as you can either modify them manually before running the job or +if you use the {\bf restore} command, explained below, Bacula will +automatically set them for you. In fact, you can no longer simply run a restore +job. You must use the restore command. + +Since Bacula is a network backup program, you must be aware that when you +restore files, it is up to you to ensure that you or Bacula have selected the +correct Client and the correct hard disk location for restoring those files. +{\bf Bacula} will quite willingly backup client A, and restore it by sending +the files to a different directory on client B. Normally, you will want to +avoid this, but assuming the operating systems are not too different in their +file structures, this should work perfectly well, if so desired. +By default, Bacula will restore data to the same Client that was backed +up, and those data will be restored not to the original places but to +{\bf /tmp/bacula-restores}. You may modify any of these defaults when the +restore command prompts you to run the job by selecting the {\bf mod} +option. + +\label{Example1} +\section{The Restore Command} +\index[general]{Command!Restore} +\index[general]{Restore Command} + +Since Bacula maintains a catalog of your files and on which Volumes (disk or +tape), they are stored, it can do most of the bookkeeping work, allowing you +simply to specify what kind of restore you want (current, before a particular +date), and what files to restore. Bacula will then do the rest. + +This is accomplished using the {\bf restore} command in the Console. First you +select the kind of restore you want, then the JobIds are selected, +the File records for those Jobs are placed in an internal Bacula directory +tree, and the restore enters a file selection mode that allows you to +interactively walk up and down the file tree selecting individual files to be +restored. This mode is somewhat similar to the standard Unix {\bf restore} +program's interactive file selection mode. + +If a Job's file records have been pruned from the catalog, the {\bf +restore} command will be unable to find any files to restore. See below +for more details on this. + +Within the Console program, after entering the {\bf restore} command, you are +presented with the following selection prompt: + +\footnotesize +\begin{verbatim} +First you select one or more JobIds that contain files +to be restored. You will be presented several methods +of specifying the JobIds. Then you will be allowed to +select which files from those JobIds are to be restored. +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +There are a lot of options, and as a point of reference, most people will +want to slect item 5 (the most recent backup for a client). The details +of the above options are: + +\begin{itemize} +\item Item 1 will list the last 20 jobs run. If you find the Job you want, + you can then select item 3 and enter its JobId(s). + +\item Item 2 will list all the Jobs where a specified file is saved. If you + find the Job you want, you can then select item 3 and enter the JobId. + +\item Item 3 allows you the enter a list of comma separated JobIds whose + files will be put into the directory tree. You may then select which + files from those JobIds to restore. Normally, you would use this option + if you have a particular version of a file that you want to restore and + you know its JobId. The most common options (5 and 6) will not select + a job that did not terminate normally, so if you know a file is + backed up by a Job that failed (possibly because of a system crash), you + can access it through this option by specifying the JobId. + +\item Item 4 allows you to enter any arbitrary SQL command. This is + probably the most primitive way of finding the desired JobIds, but at + the same time, the most flexible. Once you have found the JobId(s), you + can select item 3 and enter them. + +\item Item 5 will automatically select the most recent Full backup and all + subsequent incremental and differential backups for a specified Client. + These are the Jobs and Files which, if reloaded, will restore your + system to the most current saved state. It automatically enters the + JobIds found into the directory tree in an optimal way such that only + the most recent copy of any particular file found in the set of Jobs + will be restored. This is probably the most convenient of all the above + options to use if you wish to restore a selected Client to its most + recent state. + + There are two important things to note. First, this automatic selection + will never select a job that failed (terminated with an error status). + If you have such a job and want to recover one or more files from it, + you will need to explicitly enter the JobId in item 3, then choose the + files to restore. + + If some of the Jobs that are needed to do the restore have had their + File records pruned, the restore will be incomplete. Bacula currently + does not correctly detect this condition. You can however, check for + this by looking carefully at the list of Jobs that Bacula selects and + prints. If you find Jobs with the JobFiles column set to zero, when + files should have been backed up, then you should expect problems. + + If all the File records have been pruned, Bacula will realize that there + are no file records in any of the JobIds chosen and will inform you. It + will then propose doing a full restore (non-selective) of those JobIds. + This is possible because Bacula still knows where the beginning of the + Job data is on the Volumes, even if it does not know where particular + files are located or what their names are. + +\item Item 6 allows you to specify a date and time, after which Bacula will + automatically select the most recent Full backup and all subsequent + incremental and differential backups that started before the specified date + and time. + +\item Item 7 allows you to specify one or more filenames (complete path + required) to be restored. Each filename is entered one at a time or if you + prefix a filename with the less-than symbol (\lt{}) Bacula will read that + file and assume it is a list of filenames to be restored. If you + prefix the filename with a question mark (?), then the filename will + be interpreted as an SQL table name, and Bacula will include the rows + of that table in the list to be restored. The table must contain the + JobId in the first column and the FileIndex in the second column. + This table feature is intended for external programs that want to build + their own list of files to be restored. + The filename entry mode is terminated by entering a blank line. + +\item Item 8 allows you to specify a date and time before entering the + filenames. See Item 7 above for more details. + +\item Item 9 allows you find the JobIds of the most recent backup for + a client. This is much like option 5 (it uses the same code), but + those JobIds are retained internally as if you had entered them + manually. You may then select item 11 (see below) to restore one + or more directories. + +\item Item 10 is the same as item 9, except that it allows you to enter + a before date (as with item 6). These JobIds will then be retained + internally. + +\index[general]{Restore Directories} +\item Item 11 allows you to enter a list of JobIds from which you can + select directories to be restored. The list of JobIds can have been + previously created by using either item 9 or 10 on the menu. You + may then enter a full path to a directory name or a filename preceded + by a less than sign (\lt{}). The filename should contain a list + of directories to be restored. All files in those directories will + be restored, but if the directory contains subdirectories, nothing + will be restored in the subdirectory unless you explicitly enter its + name. + +\item Item 12 allows you to cancel the restore command. +\end{itemize} + +As an example, suppose that we select item 5 (restore to most recent state). +If you have not specified a client=xxx on the command line, it +it will then ask for the desired Client, which on my system, will print all +the Clients found in the database as follows: + +\footnotesize +\begin{verbatim} +Defined clients: + 1: Rufus + 2: Matou + 3: Polymatou + 4: Minimatou + 5: Minou + 6: MatouVerify + 7: PmatouVerify + 8: RufusVerify + 9: Watchdog +Select Client (File daemon) resource (1-9): +\end{verbatim} +\normalsize + +You will probably have far fewer Clients than this example, and if you have +only one Client, it will be automatically selected. In this case, I enter +{\bf Rufus} to select the Client. Then Bacula needs to know what FileSet is +to be restored, so it prompts with: + +\footnotesize +\begin{verbatim} +The defined FileSet resources are: + 1: Full Set + 2: Other Files +Select FileSet resource (1-2): +\end{verbatim} +\normalsize + +If you have only one FileSet defined for the Client, it will be selected +automatically. I choose item 1, which is my full backup. Normally, you +will only have a single FileSet for each Job, and if your machines are +similar (all Linux) you may only have one FileSet for all your Clients. + +At this point, {\bf Bacula} has all the information it needs to find the most +recent set of backups. It will then query the database, which may take a bit +of time, and it will come up with something like the following. Note, some of +the columns are truncated here for presentation: + +\footnotesize +\begin{verbatim} ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +| JobId | Levl | JobFiles | StartTime | VolumeName | File | SesId | +VolSesTime | ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +| 1,792 | F | 128,374 | 08-03 01:58 | DLT-19Jul02 | 67 | 18 | +1028042998 | +| 1,792 | F | 128,374 | 08-03 01:58 | DLT-04Aug02 | 0 | 18 | +1028042998 | +| 1,797 | I | 254 | 08-04 13:53 | DLT-04Aug02 | 5 | 23 | +1028042998 | +| 1,798 | I | 15 | 08-05 01:05 | DLT-04Aug02 | 6 | 24 | +1028042998 | ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +You have selected the following JobId: 1792,1792,1797 +Building directory tree for JobId 1792 ... +Building directory tree for JobId 1797 ... +Building directory tree for JobId 1798 ... +cwd is: / +$ +\end{verbatim} +\normalsize + +Depending on the number of {\bf JobFiles} for each JobId, the {\bf Building +directory tree ..."} can take a bit of time. If you notice ath all the +JobFiles are zero, your Files have probably been pruned and you will not be +able to select any individual files -- it will be restore everything or +nothing. + +In our example, Bacula found four Jobs that comprise the most recent backup of +the specified Client and FileSet. Two of the Jobs have the same JobId because +that Job wrote on two different Volumes. The third Job was an incremental +backup to the previous Full backup, and it only saved 254 Files compared to +128,374 for the Full backup. The fourth Job was also an incremental backup +that saved 15 files. + +Next Bacula entered those Jobs into the directory tree, with no files marked +to be restored as a default, tells you how many files are in the tree, and +tells you that the current working directory ({\bf cwd}) is /. Finally, Bacula +prompts with the dollar sign (\$) to indicate that you may enter commands to +move around the directory tree and to select files. + +If you want all the files to automatically be marked when the directory +tree is built, you could have entered the command {\bf restore all}, or +at the \$ prompt, you can simply enter {\bf mark *}. + +Instead of choosing item 5 on the first menu (Select the most recent backup +for a client), if we had chosen item 3 (Enter list of JobIds to select) and we +had entered the JobIds {\bf 1792,1797,1798} we would have arrived at the same +point. + +One point to note, if you are manually entering JobIds, is that you must enter +them in the order they were run (generally in increasing JobId order). If you +enter them out of order and the same file was saved in two or more of the +Jobs, you may end up with an old version of that file (i.e. not the most +recent). + +Directly entering the JobIds can also permit you to recover data from +a Job that wrote files to tape but that terminated with an error status. + +While in file selection mode, you can enter {\bf help} or a question mark (?) +to produce a summary of the available commands: + +\footnotesize +\begin{verbatim} + Command Description + ======= =========== + cd change current directory + count count marked files in and below the cd + dir long list current directory, wildcards allowed + done leave file selection mode + estimate estimate restore size + exit same as done command + find find files, wildcards allowed + help print help + ls list current directory, wildcards allowed + lsmark list the marked files in and below the cd + mark mark dir/file to be restored recursively in dirs + markdir mark directory name to be restored (no files) + pwd print current working directory + unmark unmark dir/file to be restored recursively in dir + unmarkdir unmark directory name only no recursion + quit quit and do not do restore + ? print help +\end{verbatim} +\normalsize + +As a default no files have been selected for restore (unless you +added {\bf all} to the command line. If you want to restore +everything, at this point, you should enter {\bf mark *}, and then {\bf done} +and {\bf Bacula} will write the bootstrap records to a file and request your +approval to start a restore job. + +If you do not enter the above mentioned {\bf mark *} command, you will start +with an empty slate. Now you can simply start looking at the tree and {\bf +mark} particular files or directories you want restored. It is easy to make +a mistake in specifying a file to mark or unmark, and Bacula's error handling +is not perfect, so please check your work by using the {\bf ls} or {\bf dir} +commands to see what files are actually selected. Any selected file has its +name preceded by an asterisk. + +To check what is marked or not marked, enter the {\bf count} command, which +displays: + +\footnotesize +\begin{verbatim} +128401 total files. 128401 marked to be restored. + +\end{verbatim} +\normalsize + +Each of the above commands will be described in more detail in the next +section. We continue with the above example, having accepted to restore all +files as Bacula set by default. On entering the {\bf done} command, Bacula +prints: + +\footnotesize +\begin{verbatim} +Bootstrap records written to /home/kern/bacula/working/restore.bsr +The job will require the following + Volume(s) Storage(s) SD Device(s) +=========================================================================== + + DLT-19Jul02 Tape DLT8000 + DLT-04Aug02 Tape DLT8000 + +128401 files selected to restore. +Run Restore job +JobName: kernsrestore +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Other Files +Client: Rufus +Storage: Tape +When: 2006-12-11 18:20:33 +Catalog: MyCatalog +Priority: 10 +OK to run? (yes/mod/no): + +\end{verbatim} +\normalsize + +Please examine each of the items very carefully to make sure that they are +correct. In particular, look at {\bf Where}, which tells you where in the +directory structure the files will be restored, and {\bf Client}, which +tells you which client will receive the files. Note that by default the +Client which will receive the files is the Client that was backed up. +These items will not always be completed with the correct values depending +on which of the restore options you chose. You can change any of these +default items by entering {\bf mod} and responding to the prompts. + +The above assumes that you have defined a {\bf Restore} Job resource in your +Director's configuration file. Normally, you will only need one Restore Job +resource definition because by its nature, restoring is a manual operation, +and using the Console interface, you will be able to modify the Restore Job to +do what you want. + +An example Restore Job resource definition is given below. + +Returning to the above example, you should verify that the Client name is +correct before running the Job. However, you may want to modify some of the +parameters of the restore job. For example, in addition to checking the Client +it is wise to check that the Storage device chosen by Bacula is indeed +correct. Although the {\bf FileSet} is shown, it will be ignored in restore. +The restore will choose the files to be restored either by reading the {\bf +Bootstrap} file, or if not specified, it will restore all files associated +with the specified backup {\bf JobId} (i.e. the JobId of the Job that +originally backed up the files). + +Finally before running the job, please note that the default location for +restoring files is {\bf not} their original locations, but rather the directory +{\bf /tmp/bacula-restores}. You can change this default by modifying your {\bf +bacula-dir.conf} file, or you can modify it using the {\bf mod} option. If you +want to restore the files to their original location, you must have {\bf +Where} set to nothing or to the root, i.e. {\bf /}. + +If you now enter {\bf yes}, Bacula will run the restore Job. The Storage +daemon will first request Volume {\bf DLT-19Jul02} and after the appropriate +files have been restored from that volume, it will request Volume {\bf +DLT-04Aug02}. + +\section{Selecting Files by Filename} +\index[general]{Selecting Files by Filename } +\index[general]{Filename!Selecting Files by } + +If you have a small number of files to restore, and you know the filenames, +you can either put the list of filenames in a file to be read by Bacula, or +you can enter the names one at a time. The filenames must include the full +path and filename. No wild cards are used. + +To enter the files, after the {\bf restore}, you select item number 7 from the +prompt list: + +\footnotesize +\begin{verbatim} +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +which then prompts you for the client name: + +\footnotesize +\begin{verbatim} +Defined Clients: + 1: Timmy + 2: Tibs + 3: Rufus +Select the Client (1-3): 3 +\end{verbatim} +\normalsize + +Of course, your client list will be different, and if you have only one +client, it will be automatically selected. And finally, Bacula requests you to +enter a filename: + +\footnotesize +\begin{verbatim} +Enter filename: +\end{verbatim} +\normalsize + +At this point, you can enter the full path and filename + +\footnotesize +\begin{verbatim} +Enter filename: /home/kern/bacula/k/Makefile.in +Enter filename: +\end{verbatim} +\normalsize + +as you can see, it took the filename. If Bacula cannot find a copy of the +file, it prints the following: + +\footnotesize +\begin{verbatim} +Enter filename: junk filename +No database record found for: junk filename +Enter filename: +\end{verbatim} +\normalsize + +If you want Bacula to read the filenames from a file, you simply precede the +filename with a less-than symbol (\lt{}). When you have entered all the +filenames, you enter a blank line, and Bacula will write the bootstrap file, +tells you what tapes will be used, and proposes a Restore job to be run: + +\footnotesize +\begin{verbatim} +Enter filename: +Automatically selected Storage: DDS-4 +Bootstrap records written to /home/kern/bacula/working/restore.bsr +The restore job will require the following Volumes: + + test1 +1 file selected to restore. +Run Restore job +JobName: kernsrestore +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Other Files +Client: Rufus +Storage: DDS-4 +When: 2003-09-11 10:20:53 +Priority: 10 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +It is possible to automate the selection by file by putting your list of files +in say {\bf /tmp/file-list}, then using the following command: + +\footnotesize +\begin{verbatim} +restore client=Rufus file= = / ! ; % : , ~ # = & +\end{verbatim} + +You can use several expressions separated by a commas. + +\subsection*{Examples} + +\begin{tabular}{|c|c|c|l} +\hline +Orignal filename & Computed filename & RegexWhere & Comments \\ +\hline +\hline +\texttt{c:/system.ini} & \texttt{c:/system.old.ini} & \texttt{/.ini\$/.old.ini/} & use \$ as end of filename\\ +\hline +\texttt{/prod/u01/pdata/} & \texttt{/rect/u01/rdata} & \texttt{/prod/rect/,/pdata/rdata/} & using two regexp\\ +\hline +\texttt{/prod/u01/pdata/} & \texttt{/rect/u01/rdata} & \texttt{!/prod/!/rect/!,/pdata/rdata/} & using \texttt{!} instead of \texttt{/}\\ +\hline +\texttt{C:/WINNT} & \texttt{d:/WINNT} & \texttt{/c:/d:/i} & using case-insensitive pattern matching \\ +\hline + +\end{tabular} + +%\subsubsection{Using group} +% +%Like with Perl or Sed, you can make submatch with \texttt{()}, +% +%\subsubsection*{Examples} + + +%\subsubsection{Options} +% +% i Do case-insensitive pattern matching. + +\section{Restoring Directory Attributes} +\index[general]{Attributes!Restoring Directory } +\index[general]{Restoring Directory Attributes } + +Depending how you do the restore, you may or may not get the directory entries +back to their original state. Here are a few of the problems you can +encounter, and for same machine restores, how to avoid them. + +\begin{itemize} +\item You backed up on one machine and are restoring to another that is + either a different OS or doesn't have the same users/groups defined. Bacula + does the best it can in these situations. Note, Bacula has saved the + user/groups in numeric form, which means on a different machine, they + may map to different user/group names. + +\item You are restoring into a directory that is already created and has + file creation restrictions. Bacula tries to reset everything but + without walking up the full chain of directories and modifying them all + during the restore, which Bacula does and will not do, getting + permissions back correctly in this situation depends to a large extent + on your OS. + +\item You are doing a recursive restore of a directory tree. In this case + Bacula will restore a file before restoring the file's parent directory + entry. In the process of restoring the file Bacula will create the + parent directory with open permissions and ownership of the file being + restored. Then when Bacula tries to restore the parent directory Bacula + sees that it already exists (Similar to the previous situation). If you + had set the Restore job's "Replace" property to "never" then Bacula will + not change the directory's permissions and ownerships to match what it + backed up, you should also notice that the actual number of files + restored is less then the expected number. If you had set the Restore + job's "Replace" property to "always" then Bacula will change the + Directory's ownership and permissions to match what it backed up, also + the actual number of files restored should be equal to the expected + number. + +\item You selected one or more files in a directory, but did not select the + directory entry to be restored. In that case, if the directory is not + on disk Bacula simply creates the directory with some default attributes + which may not be the same as the original. If you do not select a + directory and all its contents to be restored, you can still select + items within the directory to be restored by individually marking those + files, but in that case, you should individually use the "markdir" + command to select all higher level directory entries (one at a time) to + be restored if you want the directory entries properly restored. + +\item The {\bf bextract} program does not restore access control lists + (ACLs), nor will it restore non-portable Win32 data (default) to Unix + machines. +\end{itemize} + +\label{Windows} +\section{Restoring on Windows} +\index[general]{Restoring on Windows } +\index[general]{Windows!Restoring on } + +If you are restoring on WinNT/2K/XP systems, Bacula will restore the files +with the original ownerships and permissions as would be expected. This is +also true if you are restoring those files to an alternate directory (using +the Where option in restore). However, if the alternate directory does not +already exist, the Bacula File daemon (Client) will try to create it. In +some cases, it may not create the directories, and if it does since the +File daemon runs under the SYSTEM account, the directory will be created +with SYSTEM ownership and permissions. In this case, you may have problems +accessing the newly restored files. + +To avoid this problem, you should create any alternate directory before +doing the restore. Bacula will not change the ownership and permissions of +the directory if it is already created as long as it is not one of the +directories being restored (i.e. written to tape). + +The default restore location is {\bf /tmp/bacula-restores/} and if you are +restoring from drive {\bf E:}, the default will be +{\bf /tmp/bacula-restores/e/}, so you should ensure that this directory +exists before doing the restore, or use the {\bf mod} option to +select a different {\bf where} directory that does exist. + +Some users have experienced problems restoring files that participate in +the Active Directory. They also report that changing the userid under which +Bacula (bacula-fd.exe) runs, from SYSTEM to a Domain Admin userid, resolves +the problem. + + +\section{Restoring Files Can Be Slow} +\index[general]{Slow!Restoring Files Can Be } +\index[general]{Restoring Files Can Be Slow } + +Restoring files is generally {\bf much} slower than backing them up for several +reasons. The first is that during a backup the tape is normally already +positioned and Bacula only needs to write. On the other hand, because restoring +files is done so rarely, Bacula keeps only the start file and block on the +tape for the whole job rather than on a file by file basis which would use +quite a lot of space in the catalog. + +Bacula will forward space to the correct file mark on the tape for the Job, +then forward space to the correct block, and finally sequentially read each +record until it gets to the correct one(s) for the file or files you want to +restore. Once the desired files are restored, Bacula will stop reading the +tape. + +Finally, instead of just reading a file for backup, during the restore, Bacula +must create the file, and the operating system must allocate disk space for +the file as Bacula is restoring it. + +For all the above reasons the restore process is generally much slower than +backing up (sometimes it takes three times as long). + +\section{Problems Restoring Files} +\index[general]{Files!Problems Restoring } +\index[general]{Problems Restoring Files } + +The most frequent problems users have restoring files are error messages such +as: + +\footnotesize +\begin{verbatim} +04-Jan 00:33 z217-sd: RestoreFiles.2005-01-04_00.31.04 Error: +block.c:868 Volume data error at 20:0! Short block of 512 bytes on +device /dev/tape discarded. +\end{verbatim} +\normalsize + +or + +\footnotesize +\begin{verbatim} +04-Jan 00:33 z217-sd: RestoreFiles.2005-01-04_00.31.04 Error: +block.c:264 Volume data error at 20:0! Wanted ID: "BB02", got ".". +Buffer discarded. +\end{verbatim} +\normalsize + +Both these kinds of messages indicate that you were probably running your tape +drive in fixed block mode rather than variable block mode. Fixed block mode +will work with any program that reads tapes sequentially such as tar, but +Bacula repositions the tape on a block basis when restoring files because this +will speed up the restore by orders of magnitude when only a few files are being +restored. There are several ways that you can attempt to recover from this +unfortunate situation. + +Try the following things, each separately, and reset your Device resource to +what it is now after each individual test: + +\begin{enumerate} +\item Set "Block Positioning = no" in your Device resource and try the + restore. This is a new directive and untested. + +\item Set "Minimum Block Size = 512" and "Maximum Block Size = 512" and + try the restore. If you are able to determine the block size your drive + was previously using, you should try that size if 512 does not work. + This is a really horrible solution, and it is not at all recommended + to continue backing up your data without correcting this condition. + Please see the Tape Testing chapter for more on this. + +\item Try editing the restore.bsr file at the Run xxx yes/mod/no prompt + before starting the restore job and remove all the VolBlock statements. + These are what causes Bacula to reposition the tape, and where problems + occur if you have a fixed block size set for your drive. The VolFile + commands also cause repositioning, but this will work regardless of the + block size. + +\item Use bextract to extract the files you want -- it reads the Volume + sequentially if you use the include list feature, or if you use a .bsr + file, but remove all the VolBlock statements after the .bsr file is + created (at the Run yes/mod/no) prompt but before you start the restore. +\end{enumerate} + +\section{Restore Errors} +\index[general]{Errors!Restore} +\index[general]{Restore Errors} + +There are a number of reasons why there may be restore errors or +warning messages. Some of the more common ones are: + +\begin{description} + +\item [file count mismatch] + This can occur for the following reasons: + \begin{itemize} + \item You requested Bacula not to overwrite existing or newer + files. + \item A Bacula miscount of files/directories. This is an + on-going problem due to the complications of directories, + soft/hard link, and such. Simply check that all the files you + wanted were actually restored. + \end{itemize} + +\item [file size error] + When Bacula restores files, it checks that the size of the + restored file is the same as the file status data it saved + when starting the backup of the file. If the sizes do not + agree, Bacula will print an error message. This size mismatch + most often occurs because the file was being written as Bacula + backed up the file. In this case, the size that Bacula + restored will be greater than the status size. This often + happens with log files. + + If the restored size is smaller, then you should be concerned + about a possible tape error and check the Bacula output as + well as your system logs. +\end{description} + + + +\section{Example Restore Job Resource} +\index[general]{Example Restore Job Resource } +\index[general]{Resource!Example Restore Job } + +\footnotesize +\begin{verbatim} +Job { + Name = "RestoreFiles" + Type = Restore + Client = Any-client + FileSet = "Any-FileSet" + Storage = Any-storage + Where = /tmp/bacula-restores + Messages = Standard + Pool = Default +} +\end{verbatim} +\normalsize + +If {\bf Where} is not specified, the default location for restoring files will +be their original locations. +\label{Selection} + +\section{File Selection Commands} +\index[general]{Commands!File Selection } +\index[general]{File Selection Commands } + +After you have selected the Jobs to be restored and Bacula has created the +in-memory directory tree, you will enter file selection mode as indicated by +the dollar sign ({\bf \$}) prompt. While in this mode, you may use the +commands listed above. The basic idea is to move up and down the in memory +directory structure with the {\bf cd} command much as you normally do on the +system. Once you are in a directory, you may select the files that you want +restored. As a default no files are marked to be restored. If you wish to +start with all files, simply enter: {\bf cd /} and {\bf mark *}. Otherwise +proceed to select the files you wish to restore by marking them with the {\bf +mark} command. The available commands are: + +\begin{description} + +\item [cd] + The {\bf cd} command changes the current directory to the argument + specified. + It operates much like the Unix {\bf cd} command. Wildcard specifications are + not permitted. + + Note, on Windows systems, the various drives (c:, d:, ...) are treated like + a + directory within the file tree while in the file selection mode. As a + consequence, you must do a {\bf cd c:} or possibly in some cases a {\bf cd + C:} (note upper case) to get down to the first directory. + +\item [dir] + \index[dir]{dir } + The {\bf dir} command is similar to the {\bf ls} command, except that it + prints it in long format (all details). This command can be a bit slower + than + the {\bf ls} command because it must access the catalog database for the + detailed information for each file. + +\item [estimate] + \index[dir]{estimate } + The {\bf estimate} command prints a summary of the total files in the tree, + how many are marked to be restored, and an estimate of the number of bytes + to + be restored. This can be useful if you are short on disk space on the + machine + where the files will be restored. + +\item [find] + \index[dir]{find} + The {\bf find} command accepts one or more arguments and displays all files + in the tree that match that argument. The argument may have wildcards. It is + somewhat similar to the Unix command {\bf find / -name arg}. + +\item [ls] + The {\bf ls} command produces a listing of all the files contained in the + current directory much like the Unix {\bf ls} command. You may specify an + argument containing wildcards, in which case only those files will be + listed. + + Any file that is marked to be restored will have its name preceded by an + asterisk ({\bf *}). Directory names will be terminated with a forward slash + ({\bf /}) to distinguish them from filenames. + +\item [lsmark] + \index[fd]{lsmark} + The {\bf lsmark} command is the same as the {\bf ls} except that it will + print only those files marked for extraction. The other distinction is that + it will recursively descend into any directory selected. + +\item [mark] + \index[dir]{mark} + The {\bf mark} command allows you to mark files to be restored. It takes a + single argument which is the filename or directory name in the current + directory to be marked for extraction. The argument may be a wildcard + specification, in which case all files that match in the current directory + are marked to be restored. If the argument matches a directory rather than a + file, then the directory and all the files contained in that directory + (recursively) are marked to be restored. Any marked file will have its name + preceded with an asterisk ({\bf *}) in the output produced by the {\bf ls} +or + {\bf dir} commands. Note, supplying a full path on the mark command does not + work as expected to select a file or directory in the current directory. + Also, the {\bf mark} command works on the current and lower directories but + does not touch higher level directories. + + After executing the {\bf mark} command, it will print a brief summary: + +\footnotesize +\begin{verbatim} + No files marked. + +\end{verbatim} +\normalsize + + If no files were marked, or: + +\footnotesize +\begin{verbatim} + nn files marked. + +\end{verbatim} +\normalsize + + if some files are marked. + +\item [unmark] + \index[dir]{unmark } + The {\bf unmark} is identical to the {\bf mark} command, except that it + unmarks the specified file or files so that they will not be restored. Note: + the {\bf unmark} command works from the current directory, so it does not + unmark any files at a higher level. First do a {\bf cd /} before the {\bf + unmark *} command if you want to unmark everything. + +\item [pwd] + \index[dir]{pwd } + The {\bf pwd} command prints the current working directory. It accepts no + arguments. + +\item [count] + \index[dir]{count } + The {\bf count} command prints the total files in the directory tree and the + number of files marked to be restored. + +\item [done] + \index[dir]{done } + This command terminates file selection mode. + +\item [exit] + \index[fd]{exit } + This command terminates file selection mode (the same as done). + +\item [quit] + \index[fd]{quit } + This command terminates the file selection and does not run the restore +job. + + +\item [help] + \index[fd]{help } + This command prints a summary of the commands available. + +\item [?] + This command is the same as the {\bf help} command. +\end{description} + +\label{database_restore} +\section{Restoring When Things Go Wrong} +\index[general]{Restoring When Things Go Wrong } +\index[general]{Restoring Your Database} +\index[general]{Database!Restoring} + +This and the following sections will try to present a few of the kinds of +problems that can come up making restoring more difficult. We will try to +provide a few ideas how to get out of these problem situations. +In addition to what is presented here, there is more specific information +on restoring a \ilink{Client}{restore_client} and your +\ilink{Server}{restore_server} in the \ilink{Disaster Recovery Using +Bacula}{RescueChapter} chapter of this manual. + +\begin{description} +\item[Problem] + My database is broken. +\item[Solution] + For SQLite, use the vacuum command to try to fix the database. For either + MySQL or PostgreSQL, see the vendor's documentation. They have specific tools + that check and repair databases, see the \ilink{database + repair}{DatabaseRepair} sections of this manual for links to vendor + information. + + Assuming the above does not resolve the problem, you will need to restore + or rebuild your catalog. Note, if it is a matter of some + inconsistencies in the Bacula tables rather than a broken database, then + running \ilink{dbcheck}{dbcheck} might help, but you will need to ensure + that your database indexes are properly setup. Please see + the \ilink{Database Performance Issues}{DatabasePerformance} sections + of this manual for more details. + +\item[Problem] + How do I restore my catalog? +\item[Solution with a Catalog backup] + If you have backed up your database nightly (as you should) and you + have made a bootstrap file, you can immediately load back your + database (or the ASCII SQL output). Make a copy of your current + database, then re-initialize it, by running the following scripts: +\begin{verbatim} + ./drop_bacula_tables + ./make_bacula_tables +\end{verbatim} + After re-initializing the database, you should be able to run + Bacula. If you now try to use the restore command, it will not + work because the database will be empty. However, you can manually + run a restore job and specify your bootstrap file. You do so + by entering the {bf run} command in the console and selecting the + restore job. If you are using the default bacula-dir.conf, this + Job will be named {\bf RestoreFiles}. Most likely it will prompt + you with something such as: + +\footnotesize +\begin{verbatim} +Run Restore job +JobName: RestoreFiles +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Full Set +Client: rufus-fd +Storage: File +When: 2005-07-10 17:33:40 +Catalog: MyCatalog +Priority: 10 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + + A number of the items will be different in your case. What you want to + do is: to use the mod option to change the Bootstrap to point to your + saved bootstrap file; and to make sure all the other items such as + Client, Storage, Catalog, and Where are correct. The FileSet is not + used when you specify a bootstrap file. Once you have set all the + correct values, run the Job and it will restore the backup of your + database, which is most likely an ASCII dump. + + You will then need to follow the instructions for your + database type to recreate the database from the ASCII backup file. + See the \ilink {Catalog Maintenance}{CatMaintenanceChapter} chapter of + this manual for examples of the command needed to restore a + database from an ASCII dump (they are shown in the Compacting Your + XXX Database sections). + + Also, please note that after you restore your database from an ASCII + backup, you do NOT want to do a {\bf make\_bacula\_tables} command, or + you will probably erase your newly restored database tables. + + +\item[Solution with a Job listing] + If you did save your database but did not make a bootstrap file, then + recovering the database is more difficult. You will probably need to + use bextract to extract the backup copy. First you should locate the + listing of the job report from the last catalog backup. It has + important information that will allow you to quickly find your database + file. For example, in the job report for the CatalogBackup shown below, + the critical items are the Volume name(s), the Volume Session Id and the + Volume Session Time. If you know those, you can easily restore your + Catalog. + +\footnotesize +\begin{verbatim} +22-Apr 10:22 HeadMan: Start Backup JobId 7510, +Job=CatalogBackup.2005-04-22_01.10.0 +22-Apr 10:23 HeadMan: Bacula 1.37.14 (21Apr05): 22-Apr-2005 10:23:06 + JobId: 7510 + Job: CatalogBackup.2005-04-22_01.10.00 + Backup Level: Full + Client: Polymatou + FileSet: "CatalogFile" 2003-04-10 01:24:01 + Pool: "Default" + Storage: "DLTDrive" + Start time: 22-Apr-2005 10:21:00 + End time: 22-Apr-2005 10:23:06 + FD Files Written: 1 + SD Files Written: 1 + FD Bytes Written: 210,739,395 + SD Bytes Written: 210,739,521 + Rate: 1672.5 KB/s + Software Compression: None + Volume name(s): DLT-22Apr05 + Volume Session Id: 11 + Volume Session Time: 1114075126 + Last Volume Bytes: 1,428,240,465 + Non-fatal FD errors: 0 + SD Errors: 0 + FD termination status: OK + SD termination status: OK + Termination: Backup OK +\end{verbatim} +\normalsize + + From the above information, you can manually create a bootstrap file, + and then follow the instructions given above for restoring your database. + A reconstructed bootstrap file for the above backup Job would look + like the following: + +\footnotesize +\begin{verbatim} +Volume="DLT-22Apr05" +VolSessionId=11 +VolSessionTime=1114075126 +FileIndex=1-1 +\end{verbatim} +\normalsize + + Where we have inserted the Volume name, Volume Session Id, and Volume + Session Time that correspond to the values in the job report. We've also + used a FileIndex of one, which will always be the case providing that + there was only one file backed up in the job. + + The disadvantage of this bootstrap file compared to what is created when + you ask for one to be written, is that there is no File and Block + specified, so the restore code must search all data in the Volume to find + the requested file. A fully specified bootstrap file would have the File + and Blocks specified as follows: + +\footnotesize +\begin{verbatim} +Volume="DLT-22Apr05" +VolSessionId=11 +VolSessionTime=1114075126 +VolFile=118-118 +VolBlock=0-4053 +FileIndex=1-1 +\end{verbatim} +\normalsize + + Once you have restored the ASCII dump of the database, + you will then to follow the instructions for your + database type to recreate the database from the ASCII backup file. + See the \ilink {Catalog Maintenance}{CatMaintenanceChapter} chapter of + this manual for examples of the command needed to restore a + database from an ASCII dump (they are shown in the Compacting Your + XXX Database sections). + + Also, please note that after you restore your database from an ASCII + backup, you do NOT want to do a {\bf make\_bacula\_tables} command, or + you will probably erase your newly restored database tables. + +\item [Solution without a Job Listing] + If you do not have a job listing, then it is a bit more difficult. + Either you use the \ilink{bscan}{bscan} program to scan the contents + of your tape into a database, which can be very time consuming + depending on the size of the tape, or you can use the \ilink{bls}{bls} + program to list everything on the tape, and reconstruct a bootstrap + file from the bls listing for the file or files you want following + the instructions given above. + + There is a specific example of how to use {\bf bls} below. + +\item [Problem] + I try to restore the last known good full backup by specifying + item 3 on the restore menu then the JobId to restore. Bacula + then reports: + +\footnotesize +\begin{verbatim} + 1 Job 0 Files +\end{verbatim} +\normalsize + and restores nothing. + +\item[Solution] + Most likely the File records were pruned from the database either due + to the File Retention period expiring or by explicitly purging the + Job. By using the "llist jobid=nn" command, you can obtain all the + important information about the job: + +\footnotesize +\begin{verbatim} +llist jobid=120 + JobId: 120 + Job: save.2005-12-05_18.27.33 + Job.Name: save + PurgedFiles: 0 + Type: B + Level: F + Job.ClientId: 1 + Client.Name: Rufus + JobStatus: T + SchedTime: 2005-12-05 18:27:32 + StartTime: 2005-12-05 18:27:35 + EndTime: 2005-12-05 18:27:37 + JobTDate: 1133803657 + VolSessionId: 1 + VolSessionTime: 1133803624 + JobFiles: 236 + JobErrors: 0 + JobMissingFiles: 0 + Job.PoolId: 4 + Pool.Name: Full + Job.FileSetId: 1 + FileSet.FileSet: BackupSet +\end{verbatim} +\normalsize + + Then you can find the Volume(s) used by doing: + +\footnotesize +\begin{verbatim} +sql +select VolumeName from JobMedia,Media where JobId=1 and JobMedia.MediaId=Media.MediaId; +\end{verbatim} +\normalsize + + Finally, you can create a bootstrap file as described in the previous + problem above using this information. + + If you are using Bacula version 1.38.0 or greater, when you select + item 3 from the menu and enter the JobId, it will ask you if + you would like to restore all the files in the job, and it will + collect the above information and write the bootstrap file for + you. + +\item [Problem] + You don't have a bootstrap file, and you don't have the Job report for + the backup of your database, but you did backup the database, and you + know the Volume to which it was backed up. + +\item [Solution] + Either bscan the tape (see below for bscanning), or better use {\bf bls} + to find where it is on the tape, then use {\bf bextract} to + restore the database. For example, + + +\footnotesize +\begin{verbatim} +./bls -j -V DLT-22Apr05 /dev/nst0 +\end{verbatim} +\normalsize + Might produce the following output: +\footnotesize +\begin{verbatim} +bls: butil.c:258 Using device: "/dev/nst0" for reading. +21-Jul 18:34 bls: Ready to read from volume "DLT-22Apr05" on device "DLTDrive" +(/dev/nst0). +Volume Record: File:blk=0:0 SessId=11 SessTime=1114075126 JobId=0 DataLen=164 +... +Begin Job Session Record: File:blk=118:0 SessId=11 SessTime=1114075126 +JobId=7510 + Job=CatalogBackup.2005-04-22_01.10.0 Date=22-Apr-2005 10:21:00 Level=F Type=B +End Job Session Record: File:blk=118:4053 SessId=11 SessTime=1114075126 +JobId=7510 + Date=22-Apr-2005 10:23:06 Level=F Type=B Files=1 Bytes=210,739,395 Errors=0 +Status=T +... +21-Jul 18:34 bls: End of Volume at file 201 on device "DLTDrive" (/dev/nst0), +Volume "DLT-22Apr05" +21-Jul 18:34 bls: End of all volumes. +\end{verbatim} +\normalsize + Of course, there will be many more records printed, but we have indicated + the essential lines of output. From the information on the Begin Job and End + Job Session Records, you can reconstruct a bootstrap file such as the one + shown above. + +\item[Problem] + How can I find where a file is stored. +\item[Solution] + Normally, it is not necessary, you just use the {\bf restore} command to + restore the most recently saved version (menu option 5), or a version + saved before a given date (menu option 8). If you know the JobId of the + job in which it was saved, you can use menu option 3 to enter that JobId. + + If you would like to know the JobId where a file was saved, select + restore menu option 2. + + You can also use the {\bf query} command to find information such as: +\footnotesize +\begin{verbatim} +*query +Available queries: + 1: List up to 20 places where a File is saved regardless of the +directory + 2: List where the most recent copies of a file are saved + 3: List last 20 Full Backups for a Client + 4: List all backups for a Client after a specified time + 5: List all backups for a Client + 6: List Volume Attributes for a selected Volume + 7: List Volumes used by selected JobId + 8: List Volumes to Restore All Files + 9: List Pool Attributes for a selected Pool + 10: List total files/bytes by Job + 11: List total files/bytes by Volume + 12: List Files for a selected JobId + 13: List Jobs stored on a selected MediaId + 14: List Jobs stored for a given Volume name + 15: List Volumes Bacula thinks are in changer + 16: List Volumes likely to need replacement from age or errors +Choose a query (1-16): +\end{verbatim} +\normalsize + +\item[Problem] + I didn't backup my database. What do I do now? +\item[Solution] + This is probably the worst of all cases, and you will probably have + to re-create your database from scratch and then bscan in all your + Volumes, which is a very long, painful, and inexact process. + +There are basically three steps to take: + +\begin{enumerate} +\item Ensure that your SQL server is running (MySQL or PostgreSQL) + and that the Bacula database (normally bacula) exists. See the + \ilink{Installation}{CreateDatabase} chapter of the manual. +\item Ensure that the Bacula databases are created. This is also + described at the above link. +\item Start and stop the Bacula Director using the propriate + bacula-dir.conf file so that it can create the Client and + Storage records which are not stored on the Volumes. Without these + records, scanning is unable to connect the Job records to the proper + client. +\end{enumerate} + +When the above is complete, you can begin bscanning your Volumes. Please +see the \ilink{bscan}{bscan} section of the Volume Utility Tools of this +chapter for more details. + +\end{description} diff --git a/docs/manuals/de/concepts/setup.sm b/docs/manuals/de/concepts/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/concepts/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/concepts/spooling.tex b/docs/manuals/de/concepts/spooling.tex new file mode 100644 index 00000000..9d1e4a9a --- /dev/null +++ b/docs/manuals/de/concepts/spooling.tex @@ -0,0 +1,138 @@ +%% +%% + +\chapter{Data Spooling} +\label{SpoolingChapter} +\index[general]{Data Spooling } +\index[general]{Spooling!Data } + +Bacula allows you to specify that you want the Storage daemon to initially +write your data to disk and then subsequently to tape. This serves several +important purposes. + +\begin{itemize} +\item It takes a long time for data to come in from the File daemon during + an Incremental backup. If it is directly written to tape, the tape will + start and stop or shoe-shine as it is often called causing tape wear. + By first writing the data to disk, then writing it to tape, the tape can + be kept in continual motion. +\item While the spooled data is being written to the tape, the despooling + process has exclusive use of the tape. This means that you can spool + multiple simultaneous jobs to disk, then have them very efficiently + despooled one at a time without having the data blocks from several jobs + intermingled, thus substantially improving the time needed to restore + files. While despooling, all jobs spooling continue running. +\item Writing to a tape can be slow. By first spooling your data to disk, + you can often reduce the time the File daemon is running on a system, + thus reducing downtime, and/or interference with users. Of course, if + your spool device is not large enough to hold all the data from your + File daemon, you may actually slow down the overall backup. +\end{itemize} + +Data spooling is exactly that "spooling". It is not a way to first write a +"backup" to a disk file and then to a tape. When the backup has only been +spooled to disk, it is not complete yet and cannot be restored until it is +written to tape. + +Bacula version 1.39.x and later supports writing a backup +to disk then later {\bf Migrating} or moving it to a tape (or any +other medium). For +details on this, please see the \ilink{Migration}{MigrationChapter} chapter +of this manual for more details. + +The remainder of this chapter explains the various directives that you can use +in the spooling process. + +\label{directives} +\section{Data Spooling Directives} +\index[general]{Directives!Data Spooling } +\index[general]{Data Spooling Directives } + +The following directives can be used to control data spooling. + +\begin{itemize} +\item To turn data spooling on/off at the Job level in the Job resource in + the Director's conf file (default {\bf no}). + +{\bf SpoolData = yes|no} + +\item To override the Job specification in a Schedule Run directive in the + Director's conf file. + +{\bf SpoolData = yes|no} + +\item To limit the maximum total size of the spooled data for a particular + device. Specified in the Device resource of the Storage daemon's conf file + (default unlimited). + +{\bf Maximum Spool Size = size} + Where size is a the maximum spool size for all jobs specified in bytes. + +\item To limit the maximum total size of the spooled data for a particular + device for a single job. Specified in the Device Resource of the Storage + daemon's conf file (default unlimited). + +{\bf Maximum Job Spool Size = size} + Where size is the maximum spool file size for a single job specified in + bytes. + +\item To specify the spool directory for a particular device. Specified in + the Device Resource of the Storage daemon's conf file (default, the working + directory). + +{\bf Spool Directory = directory} +\end{itemize} + +\label{warning} + +% TODO: fix this section name +\section{!!! MAJOR WARNING !!!} +\index[general]{WARNING! MAJOR } +\index[general]{ MAJOR WARNING } + +Please be very careful to exclude the spool directory from any backup, +otherwise, your job will write enormous amounts of data to the Volume, and +most probably terminate in error. This is because in attempting to backup the +spool file, the backup data will be written a second time to the spool file, +and so on ad infinitum. + +Another advice is to always specify the maximum spool size so that your disk +doesn't completely fill up. In principle, data spooling will properly detect a +full disk, and despool data allowing the job to continue. However, attribute +spooling is not so kind to the user. If the disk on which attributes are being +spooled fills, the job will be canceled. In addition, if your working +directory is on the same partition as the spool directory, then Bacula jobs +will fail possibly in bizarre ways when the spool fills. + +\label{points} +\section{Other Points} +\index[general]{Points!Other } +\index[general]{Other Points } + +\begin{itemize} +\item When data spooling is enabled, Bacula automatically turns on attribute + spooling. In other words, it also spools the catalog entries to disk. This is + done so that in case the job fails, there will be no catalog entries + pointing to non-existent tape backups. +\item Attribute despooling occurs near the end of a job. The Storage daemon + accumulates file attributes during the backup and sends them to the + Director at the end of the job. The Director then inserts the file + attributes into the catalog. During this insertion, the tape drive may + be inactive. When the file attribute insertion is completed, the job + terminates. +\item Attribute spool files are always placed in the working directory of + the Storage daemon. +\item When Bacula begins despooling data spooled to disk, it takes exclusive + use of the tape. This has the major advantage that in running multiple + simultaneous jobs at the same time, the blocks of several jobs will not be + intermingled. +\item It probably does not make a lot of sense to enable data spooling if you + are writing to disk files. +\item It is probably best to provide as large a spool file as possible to + avoid repeatedly spooling/despooling. Also, while a job is despooling to + tape, the File daemon must wait (i.e. spooling stops for the job while it is + despooling). +\item If you are running multiple simultaneous jobs, Bacula will continue + spooling other jobs while one is despooling to tape, provided there is + sufficient spool file space. +\end{itemize} diff --git a/docs/manuals/de/concepts/state.tex b/docs/manuals/de/concepts/state.tex new file mode 100644 index 00000000..5f3ae974 --- /dev/null +++ b/docs/manuals/de/concepts/state.tex @@ -0,0 +1,243 @@ +%% +%% + +\chapter{Baculas Stand} +\label{_ChapterStart2} +\index[general]{Baculas momentaner Stand } + +(was gegenw\"{a}rtig implementiert und funktionsf\"{a}hig ist und was nicht) + +\section{Was implementiert ist} +\index[general]{implementiert!Was ist } +\index[general]{Was implementiert ist } + +\begin{itemize} +\item Sicherung/Wiederherstellung im Netzwerkes unter der Regie eines +zentralen \textbf{Director}-Prozess. + +\item automatische Ausf\"{u}hrung von + \ilink{Job}{JobDef}s nach einem festgelegten Zeitplan. + +\item Terminplanung f\"{u}r mehrere Jobs zur gleichen Zeit. + +\item die M\"{o}glichkeit einen oder mehrere Jobs zur gleichen Zeit auszuf\"{u}hren. + +\item zeitliche Staffelung der Jobs entsprechend ihrer Priorit\"{a}t. + +\item die Wiederherstellung einer oder mehrerer Dateien, die interaktiv +aus der letzten Sicherung oder einer Sicherung vor einem festgelegten +Zeitpunkt ausgew\"{a}hlt werden k\"{o}nnen. + +\item die Wiederherstellung aller Dateien eines Systems auf einer +leeren Festplatte. Dieser Vorgang kann bei Linux- und Solaris-Systemen (mit +Einschr\"{a}nkungen) gr\"{o}{\ss}tenteils automatisch ablaufen. N\"{a}heres hierzu im Kapitel +\ilink{``Disaster Recovery Using Bacula''}{_ChapterStart38}. Benutzer +berichten, dass dies auch mit Win2K/XP-Systemen funktioniert. + +\item die Ermittlung und Wiederherstellung von Dateien mittels eigenst\"{a}ndiger +Hilfsprogramme wie {\bf bls} und {\bf bextract}. Unter anderem ist es damit +m\"{o}glich, Dateien wiederherzustellen, wenn Bacula und/oder die +\textbf{Catalog}-Datenbank nicht verf\"{u}gbar ist/sind. Beachten Sie aber, dass wir +hierf\"{u}r den ``restore''-Befehl an der \textbf{Console} empfehlen und diese +Hilfsprogramme nur f\"{u}r den Notfall vorgesehen sind. + +\item die M\"{o}glichkeit, die \textbf{Catalog}-Datenbank durch Auslesen +der \textbf{Volumes} mit dem Hilfsprogramm {\bf bscan} wieder herzustellen. + +\item eine \ilink{Konsolen}{UADef}-Schnittstelle zum \textbf{Director}, \"{u}ber die +dieser vollkommen gesteuert werden kann. Die \textbf{Console} ist als +Shell-Programm, GNOME-GUI und wxWidgets-GUI verf\"{u}gbar. Beachten Sie bitte, dass +das GNOME-GUI gegen\"{u}ber dem Shell-Programm zur Zeit nur sehr wenige zus\"{a}tzliche +Funktionen aufweist. + +\item die Verifikation der Dateien, die zuvor in das +\textbf{Catalog}-Verzeichnis aufgenommen wurden, erlaubt eine Funktionalit\"{a}t +wie sie das Programm ``Tripwire'' hat (Intrusion Detection). + +\item die Authentifizierung der Komponenten (D\"{a}monen) untereinander +durch CRAM-MD5 Passw\"{o}rter. + +\item eine konfigurierbare \ilink{TLS (ssl)-Verschl\"{u}sselung }{_ChapterStart61} +zwischen den einzelnen Komponenten. + +\item leicht verst\"{a}ndliche und erweiterbare +\ilink{Konfigurationsdateien}{_ChapterStart40} f\"{u}r jeden einzelnen +D\"{a}monprozess. + +\item eine \textbf{Catalog}-Datenbank zur Aufzeichnung der \textbf{Volumes}, +\textbf{Pools}, \textbf{Jobs} und der Informationen \"{u}ber die gesicherten +Dateien. + +\item Unterst\"{u}tzung von \textbf{SQLite}, \textbf{PostgreSQL} und +\textbf{MySQL} \textbf{Catalog}-Datenbanksystemen. + +\item vom Benutzer erweiterbare Datenbankabfragen an \textbf{SQLite}-, +\textbf{PostgreSQL} und \textbf{MySQL}-Datenbanksysteme. + +\item gekennzeichnete \textbf{Volumes}, die ein versehentliches +Überschreiben (zumindest durch Bacula) verhindern. + +\item eine beliebige Anzahl verschiedener \textbf{Jobs} und +\textbf{Clients} kann auf ein einzelnes \textbf{Volume} gesichert werden. Dies +bedeutet, dass von Linux-, Unix-, Sun- und Windows-Rechnern auf das gleiche +\textbf{Volume} gesichert werden kann. Das gleiche gilt f\"{u}r die +Wiederherstellung. + +\item eine Sicherung kann sich \"{u}ber mehrere \textbf{Volumes} erstrecken. Sobald ein +\textbf{Volume} voll ist, fordert {\bf Bacula} automatisch das n\"{a}chste +\textbf{Volume} an und setzt die Sicherung fort. + +\item die Verwaltung von \ilink{\textbf{Pools} +und \textbf{Volumes}}{PoolResource} erlaubt einen anpassungsf\"{a}higen Umgang mit +\textbf{Volumes} (z.B. Gruppen von \textbf{Volumes} f\"{u}r die monatliche, +w\"{o}chentliche, t\"{a}gliche Sicherung, Gruppen von \textbf{Volumes} f\"{u}r bestimmte +\textbf{Clients}...). + +\item das Datenformat der \textbf{Volumes} ist systemunabh\"{a}ngig. Bei Bedarf +k\"{o}nnen die Daten von Linux-, Solaris- und Windows-Clients in +dasselbe \textbf{Volumen} gespeichert werden. + +\item ein konfigurierbares \ilink{Messages}-Handling. +Dazu geh\"{o}rt der Versand von Botschaften aller D\"{a}mon-Prozesse an den \textbf{Director} +und die automatische Benachrichtigung des Benutzers \"{u}ber das Mailsystem. + +\item Implementierung der Prozesse als Multithread-Programme. + +\item Programmtechnisch keine Begrenzung der L\"{a}nge der Dateinamen oder +der Botschaften. + +\item GZIP-Komprimierung f\"{u}r jede einzelne Datei, die schon der Client +erledigt, sofern dies vor einer Übertragung im Netzwerk angefordert wird. + +\item bei Bedarf die Berechnung von MD5 oder SHA1 Signaturen der +Dateidaten. + +\item POSIX ACLs werden - wenn aktiviert - unter den meisten Betriebssystemen gesichert und wiederhergestellt. + +\item die Unterst\"{u}tzung von Autochangern \"{u}ber ein einfache Shell-Schnittstelle. +Damit ist es m\"{o}glich, praktisch mit jedem Autoloader-Programm zu kommunizieren. +Ein Skript f\"{u}r {\bf mtx} ist bereitgestellt. + +\item unterst\"{u}tzt Autochanger-Barcodes -- entsprechend der Barcodes +wird das Band gekennzeichnet. + +\item automatische Unterst\"{u}tzung mehrerer Autochanger-Magazine. Hierbei wird entweder der Barcode oder das Band gelesen. + +\item Unterst\"{u}tzung von Autochangern mit mehreren Laufwerken + +\item Sicherung/Wiederherstellung als raw-Backup. Hierbei mu{\ss} die Wiederherstellung auf den gleichen Datentr\"{a}ger erfolgen. + +\item jeder Datenblock (etwa 64KByte) der \textbf{Volumes} enth\"{a}lt die +Pr\"{u}fsumme der Daten. + +\item Zugangskontrolllisten f\"{u}r \textbf{Consolen}, die dem Benutzer einen Zugang nur zu den eigenen Daten erlauben. + +\item Zwischenspeicherung der zu sichernden Daten auf der Festplatte und +fortlaufende Beschreibung des Bandes mit den zwischengespeicherten Daten +verhindert den ``Schoe-Shine-Effekt'' bei einer inkrementiellen oder +differentiellen Sicherung. + +\item Sicherung/Wiederherstellung von Dateien, die gr\"{o}{\ss}er sind als 2GB. + +\item Unterst\"{u}tzung von 64Bit-Systemen wie z.B. AMD64. + +\item es ist m\"{o}glich, die Kommunikation der D\"{a}monen untereinander durch +STunnel zu verschl\"{u}sseln. + +\item Unterst\"{u}tzung von ANSI- und IBM Band-Labels. + +\item Unterst\"{u}tzung von Unicode-Dateinamen (z.B. Chinesisch) auf Win32-Rechnern mit der Version 1.37.28 und h\"{o}her. + +\item konsistente Sicherung von ge\"{o}ffneten Dateien von Win32-Systemen (WinXP, Win2003, nicht Win2000) durch Verwendung von Volume Shadow Copy (VSS). + +\end{itemize} + +\section{Die Vorteile von Bacula gegen\"{u}ber anderen Sicherungsprogrammen} +\index[general]{Die Vorteile von Bacula gegen\"{u}ber anderen Sicherungsprogrammen} +\index[general]{Sicherungsprogrammen!Die Vorteile von Bacula gegen\"{u}ber anderen} + +\begin{itemize} +\item da f\"{u}r jeden Rechner ein eigener Client existiert, k\"{o}nnen die Daten von Betriebssystemen aller Art gesichert und wiederhergestellt werden, wobei immer gew\"{a}hrleistet ist, +dass ihre Dateiattribute korrekt gesichert und wiederhergestellt werden. + +\item Man kann auch Clients sichern ohne eine Client-Software zu benutzen und +verwendet hierzu NFS oder Samba. Wir empfehlen jedoch, sofern m\"{o}glich, auf jedem +Rechner, von dem Daten gesichert werden sollen, einen eigenen File-D\"{a}mon laufen zu +haben. + +\item Bacula kann mit Sicherungen umgehen, die auf mehrere Volumes verteilt +sind. + +\item eine umfassende SQL-Datenbank aller gesicherter Dateien erm\"{o}glicht den Überblick \"{u}ber alle gespeicherte Dateien in jedem einzelnen Volume. + +\item automatische Bereinigung der Datenbank (die Entfernung alter Aufzeichnungen) und dadurch eine Vereinfachung der Datenbankadministration. + +\item durch die Verwendung beliebiger SQL-Datenbanksysteme ist Bacula sehr anpassungsf\"{a}hig. + +\item durch den modularen, dabei aber einheitlichen Entwurf ist Bacula in hohem Ma{\ss}e skalierbar. + +\item da Bacula D\"{a}monen auf den Client-Rechnern benutzt, ist es m\"{o}glich, dort laufende Datenbank- oder sonstige Anwendungen mit systemeigenen Befehlen zu beenden und nach einer Sicherung die entsprechenden Anwendungen wieder zu starten. Dies alles kann aus einem einzigen Bacula-Job heraus geschehen. + +\item Bacula hat ein eingebautes Steuerungsprogramm f\"{u}r die Sicherungsjobs. + +\item Das Format der \textbf{Volumes} ist dokumentiert und es gibt einfache C-Programme mit denen sie gelesen und beschrieben werden k\"{o}nnen + +\item Bacula benutzt eindeutige (bei der IANA registrierte) TCP/IP-Ports -- also weder RPCs noch Shared Memory. + +\item Baculas Installation und Konfiguration ist gegen\"{u}ber anderen vergleichbaren Produkten relativ einfach. + +\item laut einem unserer Benutzer ist Bacula genau so schnell wie die wichtigen gro{\ss}en kommerziellen Programme. + +\item laut einem anderen Benutzer ist Bacula vier mal so schnell wie eine andere kommerzielle Anwendung. Das vielleicht deswegen, weil diese Anwendung ihre Verzeichnisinformationen in vielen einzelnen Dateien anstatt in einer SQL-Datenbank speichert, wie Bacula es tut. + +\item neben der grafischen Benutzeroberfl\"{a}che zur Verwaltung hat Bacula eine umfassende Shell-Schnittstelle f\"{u}r die Wartungsaufgaben, wobei der Administrator Werkzeuge wie z.B. ``ssh'' verwenden kann, um jeden Teil von Bacula von \"{u}berall (sogar von Zuhause) zu administrieren. + +\item Bacula hat eine Rettungs-CD f\"{u}r Linux-Systeme mit den folgenden Eigenschaften: + \begin{itemize} + \item Sie kompilieren sie von Grund auf auf ihrem eigenen System mit einem einzigen einfachen Befehl: + ``make'' (...OK, Sie brauchen dann noch ``make burn''...). + \item die Rettungs-CD verwendet Ihren Kernel + \item sie schreibt Skripte entsprechend der Parameter Ihrer Festplatte mit denen Sie diese automatisch + repartitionieren und formatieren k\"{o}nnen, um den Ausgangszustand wieder herzustellen. + + \item sie hat ein Skript, das Ihr Netzwerk wieder starten wird (mit der korrekten IP-Adresse) + + \item sie hat ein Skript, mit dem Ihre Festplatten automatisch gemountet werden. + + \item eine vollwertiger Bacula-FD ist statisch eingebunden + + \item sie k\"{o}nnen der Rettungs-CD auf einfache Weise zus\"{a}tzliche Daten und Programme hinzuf\"{u}gen. + \end{itemize} + +\end{itemize} + +\section{Einschr\"{a}nkungen der aktuellen Implementierung} +\index[general]{Einschr\"{a}nkungen der aktuellen Implementierung } +\index[general]{aktuelle Implementierung! Einschr\"{a}nkungen der} + +\begin{itemize} +\item Pfade und Dateinamen mit mehr als 260 Zeichen werden auf Win32-Systemen nicht unterst\"{u}tzt. Diese werden zwar gesichert, k\"{o}nnen aber nicht wiederhergestellt werden. Durch Verwendung der Direktive {\bf Portable=yes} in Ihrem FileSet k\"{o}nnen Dateien mit langen Namen auf Unix- bzw. Linux-Systemen wiederhergestellt werden. +Lange Dateinamen f\"{u}r Win32-Systeme werden in einer sp\"{a}teren Version implementiert sein. + +\item Sollten Sie mehr als 4 Milliarden Dateieintr\"{a}ge in Ihrer Datenbank gespeichert haben, wird der FileID der Datenbank vermutlich \"{u}berlaufen. Dies w\"{a}re eine ziemlich gro{\ss}e Datenbank, aber immerhin ist sie denkbar. Irgendwann einmal wird das Feld f\"{u}r den FileID von Bacula von 32 auf 64 Bit erweitert werden und das Problem ist gel\"{o}st. In der Zwischenzeit ist die Verwendung mehrerer Datenbanken eine gute L\"{o}sung. + +\item Dateien, die nach einer Vollsicherung gel\"{o}scht wurden, werden bei einer Wiederherstellung eingeschlossen. + +\item Datei-System-Module fehlen(dies w\"{a}ren konfigurierbare Routinen, um spezielle Dateien zu sichern/wiederherzustellen). + +\item Verschl\"{u}sselung des Dateninhalts der \textbf{Volumes}. + +\item Bacula kann die Dateien eines einzelnen Jobs nicht von zwei oder mehreren Speicherger\"{a}ten oder verschiedenen Speichermedien wiederherstellen. Daher wird eine Wiederherstellung einige Handarbeit erfordern, wenn sie auf mehr als ein Sicherungsger\"{a}t oder verschiedene Medientypen speichern. + + \end{itemize} + +\section{Grenzen und Beschr\"{a}nkungen des Software Design} +\index[general]{Restrictions!Design Limitations or } +\index[general]{Design Limitations or Restrictions } + +\begin{itemize} +\item Namen (\textbf{Resource}-Namen, \textbf{Volume}-Names und \"{a}hnliche) in Baculas Konfigurationsdateien sind auf eine bestimmte L\"{a}nge beschr\"{a}nkt . Momentan liegt die Grenze bei 127 Zeichen. Beachten Sie bitte, dass diese Einschr\"{a}nkungen nicht die Dateinamen betrifft, die beliebig lang sein k\"{o}nnen. + +\item Durch die Nicht-Unicode Windows API, die wir auf Win32-Maschinen verwenden, sind wir bei Dateinamen auf 260 Zeichen beschr\"{a}nkt. Wir planen dies in einer zuk\"{u}nftigen Version zu korrigieren, indem wir die Unicode-API verwenden. + +\end{itemize} diff --git a/docs/manuals/de/concepts/strategies.tex b/docs/manuals/de/concepts/strategies.tex new file mode 100644 index 00000000..b0bcfebc --- /dev/null +++ b/docs/manuals/de/concepts/strategies.tex @@ -0,0 +1,439 @@ +%% +%% + +\chapter{Backup Strategies} +\label{StrategiesChapter} +\index[general]{Strategies!Backup } +\index[general]{Backup Strategies } + +Although Recycling and Backing Up to Disk Volume have been discussed in +previous chapters, this chapter is meant to give you an overall view of +possible backup strategies and to explain their advantages and disadvantages. +\label{Simple} + +\section{Simple One Tape Backup} +\index[general]{Backup!Simple One Tape } +\index[general]{Simple One Tape Backup } + +Probably the simplest strategy is to back everything up to a single tape and +insert a new (or recycled) tape when it fills and Bacula requests a new one. + +\subsection{Advantages} +\index[general]{Advantages } + +\begin{itemize} +\item The operator intervenes only when a tape change is needed. (once a + month at my site). +\item There is little chance of operator error because the tape is not + changed daily. +\item A minimum number of tapes will be needed for a full restore. Typically + the best case will be one tape and worst two. +\item You can easily arrange for the Full backup to occur a different night + of the month for each system, thus load balancing and shortening the backup + time. +\end{itemize} + +\subsection{Disadvantages} +\index[general]{Disadvantages } + +\begin{itemize} +\item If your site burns down, you will lose your current backups, and in my + case about a month of data. +\item After a tape fills and you have put in a blank tape, the backup will + continue, and this will generally happen during working hours. + \end{itemize} + +\subsection{Practical Details} +\index[general]{Details!Practical } +\index[general]{Practical Details } + +This system is very simple. When the tape fills and Bacula requests a new +tape, you {\bf unmount} the tape from the Console program, insert a new tape +and {\bf label} it. In most cases after the label, Bacula will automatically +mount the tape and resume the backup. Otherwise, you simply {\bf mount} the +tape. + +Using this strategy, one typically does a Full backup once a week followed by +daily Incremental backups. To minimize the amount of data written to the tape, +one can do a Full backup once a month on the first Sunday of the +month, a Differential backup on the 2nd-5th Sunday of the month, and +incremental backups the rest of the week. +\label{Manual} + +\section{Manually Changing Tapes} +\index[general]{Tapes!Manually Changing } +\index[general]{Manually Changing Tapes } + +If you use the strategy presented above, Bacula will ask you to change the +tape, and you will {\bf unmount} it and then remount it when you have inserted +the new tape. + +If you do not wish to interact with Bacula to change each tape, there are +several ways to get Bacula to release the tape: + +\begin{itemize} +\item In your Storage daemon's Device resource, set + {\bf AlwaysOpen = no} + In this case, Bacula will release the tape after every job. If you run + several jobs, the tape will be rewound and repositioned to the end at the + beginning of every job. This is not very efficient, but does let you change + the tape whenever you want. +\item Use a {\bf RunAfterJob} statement to run a script after your last job. + This could also be an {\bf Admin} job that runs after all your backup jobs. + The script could be something like: + +\footnotesize +\begin{verbatim} + #!/bin/sh + /full-path/bconsole -c /full-path/bconsole.conf <----| Stunnel 1 |-----> Port 9102 + |===========| + stunnel-fd2.conf + |===========| + Port 9103 >----| Stunnel 2 |-----> server:29103 + |===========| + Director (server): + stunnel-dir.conf + |===========| + Port 29102 >----| Stunnel 3 |-----> client:29102 + |===========| + stunnel-sd.conf + |===========| + Port 29103 >----| Stunnel 4 |-----> 9103 + |===========| +\end{verbatim} +\normalsize + +\section{Certificates} +\index[general]{Certificates } + +In order for stunnel to function as a server, which it does in our diagram for +Stunnel 1 and Stunnel 4, you must have a certificate and the key. It is +possible to keep the two in separate files, but normally, you keep them in one +single .pem file. You may create this certificate yourself in which case, it +will be self-signed, or you may have it signed by a CA. + +If you want your clients to verify that the server is in fact valid (Stunnel 2 +and Stunnel 3), you will need to have the server certificates signed by a CA +(Certificate Authority), and you will need to have the CA's public certificate +(contains the CA's public key). + +Having a CA signed certificate is {\bf highly} recommended if you are using +your client across the Internet, otherwise you are exposed to the man in the +middle attack and hence loss of your data. + +See below for how to create a self-signed certificate. + +\section{Securing the Data Channel} +\index[general]{Channel!Securing the Data } +\index[general]{Securing the Data Channel } + +To simplify things a bit, let's for the moment consider only the data channel. +That is the connection between the File daemon and the Storage daemon, which +takes place on port 9103. In fact, in a minimalist solution, this is the only +connection that needs to be encrypted, because it is the one that transports your +data. The connection between the Director and the File daemon is simply a +control channel used to start the job and get the job status. + +Normally the File daemon will contact the Storage daemon on port 9103 +(supplied by the Director), so we need an stunnel that listens on port 9103 on +the File daemon's machine, encrypts the data and sends it to the Storage +daemon. This is depicted by Stunnel 2 above. Note that this stunnel is +listening on port 9103 and sending to server:29103. We use port 29103 on the +server because if we would send the data to port 9103, it would go directly to the +Storage daemon, which doesn't understand encrypted data. On the server +machine, we run Stunnel 4, which listens on port 29103, decrypts the data and +sends it to the Storage daemon, which is listening on port 9103. + +\section{Data Channel Configuration} +\index[general]{Modification of bacula-dir.conf for the Data Channel } +\index[general]{baculoa-dir.conf!Modification for the Data Channel } + +The Storage resource of the bacula-dir.conf normally looks something like the +following: + +\footnotesize +\begin{verbatim} +Storage { + Name = File + Address = server + SDPort = 9103 + Password = storage_password + Device = File + Media Type = File +} +\end{verbatim} +\normalsize + +Notice that this is running on the server machine, and it points the File +daemon back to server:9103, which is where our Storage daemon is listening. We +modify this to be: + +\footnotesize +\begin{verbatim} +Storage { + Name = File + Address = localhost + SDPort = 9103 + Password = storage_password + Device = File + Media Type = File +} +\end{verbatim} +\normalsize + +This causes the File daemon to send the data to the stunnel running on +localhost (the client machine). We could have used client as the address as +well. + +\section{Stunnel Configuration for the Data Channel} +\index[general]{Stunnel Configuration for the Data Channel } + +In the diagram above, we see above Stunnel 2 that we use stunnel-fd2.conf on the +client. A pretty much minimal config file would look like the following: + +\footnotesize +\begin{verbatim} +client = yes +[29103] +accept = localhost:9103 +connect = server:29103 +\end{verbatim} +\normalsize + +The above config file does encrypt the data but it does not require a +certificate, so it is subject to the man in the middle attack. The file I +actually used, stunnel-fd2.conf, looked like this: + +\footnotesize +\begin{verbatim} +# +# Stunnel conf for Bacula client -> SD +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29103] +accept = localhost:9103 +connect = server:29103 +\end{verbatim} +\normalsize + +You will notice that I specified a pid file location because I ran stunnel +under my own userid so I could not use the default, which requires root +permission. I also specified a certificate that I have as well as verify level +2 so that the certificate is required and verified, and I must supply the +location of the CA (Certificate Authority) certificate so that the stunnel +certificate can be verified. Finally, you will see that there are two lines +commented out, which when enabled, produce a lot of nice debug info in the +command window. + +If you do not have a signed certificate (stunnel.pem), you need to delete the +cert, CAfile, and verify lines. + +Note that the stunnel.pem, is actually a private key and a certificate in a +single file. These two can be kept and specified individually, but keeping +them in one file is more convenient. + +The config file, stunnel-sd.conf, needed for Stunnel 4 on the server machine +is: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for Storage daemon +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is mandatory here, it may be self signed +# If it is self signed, the client may not use +# verify +# +cert = /home/kern/stunnel/stunnel.pem +client = no +# debug = 7 +# foreground = yes +[29103] +accept = 29103 +connect = 9103 +\end{verbatim} +\normalsize + +\section{Starting and Testing the Data Encryption} +\index[general]{Starting and Testing the Data Encryption } +\index[general]{Encryption!Starting and Testing the Data } + +It will most likely be the simplest to implement the Data Channel encryption +in the following order: + +\begin{itemize} +\item Setup and run Bacula backing up some data on your client machine + without encryption. +\item Stop Bacula. +\item Modify the Storage resource in the Director's conf file. +\item Start Bacula +\item Start stunnel on the server with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-sd.conf + +\end{verbatim} +\normalsize + +\item Start stunnel on the client with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-fd2.conf + +\end{verbatim} +\normalsize + +\item Run a job. +\item If it doesn't work, turn debug on in both stunnel conf files, restart + the stunnels, rerun the job, repeat until it works. + \end{itemize} + +\section{Encrypting the Control Channel} +\index[general]{Channel!Encrypting the Control } +\index[general]{Encrypting the Control Channel } + +The Job control channel is between the Director and the File daemon, and as +mentioned above, it is not really necessary to encrypt, but it is good +practice to encrypt it as well. The two stunnels that are used in this case +will be Stunnel 1 and Stunnel 3 in the diagram above. Stunnel 3 on the server +might normally listen on port 9102, but if you have a local File daemon, this +will not work, so we make it listen on port 29102. It then sends the data to +client:29102. Again we use port 29102 so that the stunnel on the client +machine can decrypt the data before passing it on to port 9102 where the File +daemon is listening. + +\section{Control Channel Configuration} +\index[general]{Control Channel Configuration } + +We need to modify the standard Client resource, which would normally look +something like: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = client + FDPort = 9102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +to be: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = localhost + FDPort = 29102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +This will cause the Director to send the control information to +localhost:29102 instead of directly to the client. + +\section{Stunnel Configuration for the Control Channel} +\index[general]{Config Files for stunnel to Encrypt the Control Channel } + +The stunnel config file, stunnel-dir.conf, for the Director's machine would +look like the following: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +\end{verbatim} +\normalsize + +and the config file, stunnel-fd1.conf, needed to run stunnel on the Client +would be: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +\end{verbatim} +\normalsize + +\section{Starting and Testing the Control Channel} +\index[general]{Starting and Testing the Control Channel } +\index[general]{Channel!Starting and Testing the Control } + +It will most likely be the simplest to implement the Control Channel +encryption in the following order: + +\begin{itemize} +\item Stop Bacula. +\item Modify the Client resource in the Director's conf file. +\item Start Bacula +\item Start stunnel on the server with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-dir.conf + +\end{verbatim} +\normalsize + +\item Start stunnel on the client with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-fd1.conf + +\end{verbatim} +\normalsize + +\item Run a job. +\item If it doesn't work, turn debug on in both stunnel conf files, restart + the stunnels, rerun the job, repeat until it works. + \end{itemize} + +\section{Using stunnel to Encrypt to a Second Client} +\index[general]{Using stunnel to Encrypt to a Second Client } +\index[general]{Client!Using stunnel to Encrypt to a Second } + +On the client machine, you can just duplicate the setup that you have on the +first client file for file and it should work fine. + +In the bacula-dir.conf file, you will want to create a second client pretty +much identical to how you did for the first one, but the port number must be +unique. We previously used: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = localhost + FDPort = 29102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +so for the second client, we will, of course, have a different name, and we +will also need a different port. Remember that we used port 29103 for the +Storage daemon, so for the second client, we can use port 29104, and the +Client resource would look like: + +\footnotesize +\begin{verbatim} +Client { + Name = client2-fd + Address = localhost + FDPort = 29104 + Catalog = BackupDB + Password = "yyy" +} +\end{verbatim} +\normalsize + +Now, fortunately, we do not need a third stunnel to on the Director's machine, +we can just add the new port to the config file, stunnel-dir.conf, to make: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +[29104] +accept = localhost:29102 +connect = client2:29102 +\end{verbatim} +\normalsize + +There are no changes necessary to the Storage daemon or the other stunnel so +that this new client can talk to our Storage daemon. + +\section{Creating a Self-signed Certificate} +\index[general]{Creating a Self-signed Certificate } +\index[general]{Certificate!Creating a Self-signed } + +You may create a self-signed certificate for use with stunnel that will permit +you to make it function, but will not allow certificate validation. The .pem +file containing both the certificate and the key can be made with the +following, which I put in a file named {\bf makepem}: + +\footnotesize +\begin{verbatim} +#!/bin/sh +# +# Simple shell script to make a .pem file that can be used +# with stunnel and Bacula +# +OPENSSL=openssl + umask 77 + PEM1="/bin/mktemp openssl.XXXXXX" + PEM2="/bin/mktemp openssl.XXXXXX" + ${OPENSSL} req -newkey rsa:1024 -keyout $PEM1 -nodes \ + -x509 -days 365 -out $PEM2 + cat $PEM1 > stunnel.pem + echo "" >>stunnel.pem + cat $PEM2 >>stunnel.pem + rm $PEM1 $PEM2 +\end{verbatim} +\normalsize + +The above script will ask you a number of questions. You may simply answer +each of them by entering a return, or if you wish you may enter your own data. + + +\section{Getting a CA Signed Certificate} +\index[general]{Certificate!Getting a CA Signed } +\index[general]{Getting a CA Signed Certificate } + +The process of getting a certificate that is signed by a CA is quite a bit +more complicated. You can purchase one from quite a number of PKI vendors, but +that is not at all necessary for use with Bacula. + +To get a CA signed +certificate, you will either need to find a friend that has setup his own CA +or to become a CA yourself, and thus you can sign all your own certificates. +The book OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly +explains how to do it, or you can read the documentation provided in the +Open-source PKI Book project at Source Forge: +\elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +\section{Using ssh to Secure the Communications} +\index[general]{Communications!Using ssh to Secure the } +\index[general]{Using ssh to Secure the Communications } + +Please see the script {\bf ssh-tunnel.sh} in the {\bf examples} directory. It +was contributed by Stephan Holl. diff --git a/docs/manuals/de/concepts/supportedchangers.tex b/docs/manuals/de/concepts/supportedchangers.tex new file mode 100644 index 00000000..ebf876bc --- /dev/null +++ b/docs/manuals/de/concepts/supportedchangers.tex @@ -0,0 +1,76 @@ +%% +%% + +\chapter{Supported Autochangers} +\label{Models} +\index[general]{Supported Autochanger Models} +\index[general]{Autochangers!Supported} + +I hesitate to call these "supported" autochangers because the only +autochangers that I have in my possession and am able to test are the HP +SureStore DAT40X6 and the Overland PowerLoader LTO-2. All the other +autochangers have been reported to work by Bacula users. Note, in the +Capacity/Slot column below, I quote the Compressed capacity per tape (or +Slot). + +Since on most systems (other than FreeBSD), Bacula uses {\bf mtx} +through the {\bf mtx-changer} script, in principle, if {\bf mtx} +will operate your changer correctly, then it is just a question +of adapting the {\bf mtx-changer} script (or selecting one +already adapted) for proper interfacing. You can find a list of +autochangers supported by {\bf mtx} at the following link: +\elink{http://mtx.opensource-sw.net/compatibility.php} +{\url{http://mtx.opensource-sw.net/compatibility.php}}. +The home page for the {\bf mtx} project can be found at: +\elink{http://mtx.opensource-sw.net/}{\url{http://mtx.opensource-sw.net/}}. + + +\addcontentsline{lot}{table}{Autochangers Known to Work with Bacula} +\begin{longtable}{|p{0.6in}|p{0.8in}|p{1.9in}|p{0.8in}|p{0.5in}|p{0.75in}|} + \hline +\multicolumn{1}{|c| }{\bf OS } & \multicolumn{1}{c| }{\bf Man. } & +\multicolumn{1}{c| }{\bf Media } & \multicolumn{1}{c| }{\bf Model } & +\multicolumn{1}{c| }{\bf Slots } & \multicolumn{1}{c| }{\bf Cap/Slot } \\ + \hline {Linux } & {Adic } & {DDS-3} & {Adic 1200G } & {12} & {-} \\ + \hline {Linux } & {Adic } & {DLT} & {FastStore 4000 } & {7} & {20GB} \\ + \hline {Linux } & {Adic } & {LTO-1/2, SDLT 320 } & {Adic Scalar 24 } & {24} & {100GB } \\ + \hline {Linux } & {Adic } & {LTO-2 } & {Adic FastStor 2, Sun Storedge L8 } & {8} & {200GB } \\ + \hline {Linux } & {BDT } & {AIT } & {BDT ThinStor } & {?} & {200GB } \\ + \hline {- } & {CA-VM } & {?? } & {Tape } & {??} & {?? } \\ + \hline {Linux } & {Dell} & {DLT VI,LTO-2,LTO3} & {PowerVault 122T/132T/136T } & {-} & {100GB } \\ + \hline {Linux } & {Dell} & {LTO-2} & {PowerVault 124T } & {-} & {200GB } \\ + \hline {- } & {DFSMS } & {?? } & {VM RMM} & {-} & {?? } \\ + \hline {Linux } & {Exabyte } & {VXA2 } & {VXA PacketLoader 1x10 2U } & {10} & {80/160GB } \\ + \hline {- } & {Exabyte } & {LTO } & {Magnum 1x7 LTO Tape Auotloader } & {7} & {200/400GB } \\ + \hline {Linux } & {Exabyte } & {AIT-2 } & {215A } & {15 (2 drives)} & {50GB } \\ + \hline {Linux } & {HP } & {DDS-4 } & {SureStore DAT-40X6 } & {6 } & {40GB } \\ + \hline {Linux } & {HP } & {Ultrium-2/LTO } & {MSL 6000/ 60030/ 5052 } & {28 } & {200/400GB } \\ + \hline {- } & {HP } & {DLT } & {A4853 DLT } & {30} & {40/70GB } \\ + \hline {Linux } & {HP (Compaq) } & {DLT VI } & {Compaq TL-895 } & {96+4 import export} & {35/70GB } \\ + \hline {z/VM } & {IBM } & {?? } & {IBM Tape Manager } & {-} & {?? } \\ + \hline {z/VM } & {IBM } & {?? } & {native tape } & {-} & {?? } \\ + \hline {Linux } & {IBM } & {LTO } & {IBM 3581 Ultrium Tape Loader } & {7} & {200/400GB } \\ + \hline {FreeBSD 5.4} & {IBM } & {DLT} & {IBM 3502-R14 -- rebranded ATL L-500} & {14} & {35/70GB } \\ + \hline {Linux} & {IBM } & {???} & {IBM TotalStorage 3582L23} & {??} & {?? } \\ + \hline {Debian} & {Overland } & {LTO } & {Overland LoaderXpress LTO/DLT8000 } & {10-19} & {40-100GB } \\ + \hline {Fedora} & {Overland } & {LTO } & {Overland PowerLoader LTO-2 } & {10-19} & {200/400GB } \\ + \hline {FreeBSD 5.4-Stable} & {Overland} & {LTO-2} & {Overland Powerloader tape} & {17} & {100GB } \\ + \hline {- } & {Overland} & {LTO } & {Overland Neo2000 LTO } & {26-30} & {100GB } \\ + \hline {Linux} & {Quantum } & {DLT-S4} & {Superloader 3} & {16} & {800/1600GB } \\ + \hline {Linux} & {Quantum } & {LTO-2} & {Superloader 3} & {16} & {200/400GB } \\ + \hline {Linux} & {Quantum } & {LTO-3 } & {PX502 } & {??} & {?? } \\ + \hline {FreeBSD 4.9 } & {QUALSTAR TLS-4210 (Qualstar) } & {AIT1: 36GB, AIT2: 50GB all +uncomp } & {QUALSTAR TLS-4210 } & {12} & {AIT1: 36GB, AIT2: 50GB all uncomp }\\ + \hline {Linux } & {Skydata } & {DLT } & {ATL-L200 } & {8} & {40/80 } \\ + \hline {- } & {Sony } & {DDS-4 } & {TSL-11000 } & {8} & {40GB } \\ + \hline {Linux } & {Sony } & {AIT-2} & {LIB-304(SDX-500C) } & {?} & {200GB } \\ + \hline {Linux } & {Sony } & {AIT-3} & {LIB-D81) } & {?} & {200GB } \\ + \hline {FreeBSD 4.9-STABLE } & {Sony } & {AIT-1 } & {TSL-SA300C } & {4} & {45/70GB }\\ + \hline {- } & {Storagetek } & {DLT } & {Timberwolf DLT } & {6} & {40/70 } \\ + \hline {- } & {Storagetek } & {?? } & {ACSLS } & {??} & {?? } \\ + \hline {Solaris } & {Sun } & {4mm DLT } & {Sun Desktop Archive Python 29279 } & {4} & {20GB } \\ + \hline {Linux } & {Tandberg } & {DLT VI } & {VS 640 } & {8?} & {35/70GB } \\ + \hline {Linux 2.6.x } & {Tandberg Data } & {SLR100 } & {SLR100 Autoloader } & {8} & {50/100GB }\\ +\hline + +\end{longtable} diff --git a/docs/manuals/de/concepts/supporteddrives.tex b/docs/manuals/de/concepts/supporteddrives.tex new file mode 100644 index 00000000..2793d3f7 --- /dev/null +++ b/docs/manuals/de/concepts/supporteddrives.tex @@ -0,0 +1,106 @@ +%% +%% + +\chapter{Unterst\"{u}tzte Bandlaufwerke} +\label{SupportedDrives} +\index[general]{Bandlaufwerke!unterst\"{u}tzte} +\index[general]{Unterst\"{u}tzte Bandlaufwerke } + +Auch wenn Ihr Bandlaufwerk in der untenstehenden Liste eingetragen ist, lesen Sie bitte im Kapitel \ilink{Test der Bandlaufwerke}{btape1} in diesem Handbuch wie Sie sich vergewissern k\"{o}nnen, dass Ihr Bandlaufwerk mit Bacula zusammen funktionieren wird. + +Wenn Ihr Laufwerk im festen Block-Modus arbeitet, k\"{o}nnte es zun\"{a}chst so aussehen, als ob es funkioniert, bis sie dann eine Wiederherstellung machen und Bacula versucht, das Band zu positionieren. Sie k\"{o}nnen nur sicher sein, wenn sie die oben vorgeschlagenen Verfahren befolgen und testen. + +Weil wir so wenige R\"{u}ckmeldungen haben, ist es sehr schwierig, eine Liste der unterst\"{u}tzten Laufwerke oder zumindest jener zu liefern, mit denen Bacula funktioniert (wenn sie also Bacula mit einem anderen Laufwerk benutzen, melden Sie es bitte). Laut unseren Benutzern arbeiten die folgenden Laufwerke unter Bacula. Ein Strich in einer Spalte bedeutet ``unbekannt''. + +\addcontentsline{lot}{table}{Supported Tape Drives} +%war: zwei mal 2.5 in +\begin{longtable}{|p{1.0in}|l|l|p{1.5in}|l|} + \hline +\multicolumn{1}{|c| }{\bf BS } & \multicolumn{1}{c| }{\bf Herst.} & +\multicolumn{1}{c| }{\bf Media } & \multicolumn{1}{c| }{\bf Modell } & +\multicolumn{1}{c| }{\bf Kapazit\"{a}t } \\ + \hline {- } & {ADIC } & {DLT } & {Adic Scalar 100 DLT } & {100GB } \\ + \hline {- } & {ADIC } & {DLT } & {Adic Fastor 22 DLT } & {- } \\ + \hline {- } & {- } & {DDS } & {Compaq DDS 2,3,4 } & {- } \\ + \hline {- } & {Exabyte } & {- } & {Exabyte LWe, \lt 10 Jahre alt } & {- } \\ + \hline {- } & {Exabyte } & {- } & {Exabyte VXA LWe } & {- } \\ + \hline {- } & {HP } & {Travan 4 } & {Colorado T4000S } & {- } \\ + \hline {- } & {HP } & {DLT } & {HP DLT LWe } & {- } \\ + \hline {- } & {HP } & {LTO } & {HP LTO Ultrium LWe } & {- } \\ + \hline {- } & {IBM} & {??} & {3480, 3480XL, 3490, 3490E, 3580 and 3590 LWe} & {- } \\ + \hline {FreeBSD 4.10 RELEASE } & {HP } & {DAT } & {HP StorageWorks DAT72i } & {- } \\ + \hline {FreeBSD 5.4-RELEASE-p1 amd64 } & {Certance} & {LTO } & {AdicCertance CL400 LTO Ultrium 2 } & {200GB } \\ + \hline {- } & {Overland } & {LTO } & {LoaderXpress LTO } & {- } \\ + \hline {SuSE 8.1 Pro} & {Compaq} & {AIT } & {Compaq AIT 35 LVD } & {35/70GB } \\ + \hline {- } & {Overland } & {- } & {Neo2000 } & {- } \\ + \hline {- } & {OnStream } & {- } & {OnStream LWe (siehe unten) } & {- } \\ + \hline {- } & {Quantum } & {DLT } & {DLT-8000 } & {40/80GB } \\ + \hline {Linux } & {Seagate } & {DDS-4 } & {Scorpio 40 } & {20/40GB } \\ + \hline {FreeBSD 4.9 STABLE } & {Seagate } & {DDS-4 } & {STA2401LW } & {20/40GB } \\ + \hline {FreeBSD 5.2.1, Pthreads gepatcht } & {Seagate } & {AIT-1 } & {STA1701W} & {35/70GB } \\ + \hline {Linux } & {Sony } & {DDS-2,3,4 } & {- } & {4-40GB } \\ + \hline {Linux } & {Tandberg } & {- } & {Tandbert MLR3 } & {- } \\ + \hline {FreeBSD } & {Tandberg } & {- } & {Tandberg SLR6 } & {- } \\ + \hline {FreeBSD 4.11-Release} & {Quantum } & {SDLT } & {SDLT320 } & {160/320GB } \\ + \hline {Solaris } & {Tandberg } & {- } & {Tandberg SLR75 } & {- } \\ + \hline + +\end{longtable} + +Es gibt eine Liste mit \ilink{unterst\"{u}tzten Autochanger}{Models} im Kapitel ``Unterst\"{u}tzte Autochanger'' in diesem Dokument, in dem noch weitere Laufwerke aufgef\"{u}hrt sind, die mit Bacula funktionieren. + +\section{Nicht unterst\"{u}tzte Bandlaufwerke} +\label{UnSupportedDrives} +\index[general]{Nicht unterst\"{u}tzte Bandlaufwerke } +\index[general]{Bandlaufwerke!nicht unterst\"{u}tzte } + +Bisher funktionierten OnStream IDE-SCSI Bandlaufwerke nicht unter Bacula. Seit der Bacula-Version 1.33 und der Version 0.9.14 des osst-Kerneldrivers funktionieren sie nun. Da sie eine feste Blockgr\"{o}{\ss}e einstellen m\"{u}ssen, beachten sie bitte das Kapitel zum Testen. + +Von QIC-B\"{a}ndern wei{\ss} man, dass sie einige Besonderheiten haben (feste Blockgr\"{o}{\ss}e, eher ein EOF als zwei zur Markierung des Bandendes). Sie m\"{u}ssen diese daher sehr sorgf\"{a}ltig konfigurieren, wenn sie korrekt mit Bacula arbeiten sollen. + +\section{Warnung f\"{u}r FreeBSD-Benutzer!!!} +\index[general]{Warnung f\"{u}r FreeBSD-Benutzer!!! } +\index[general]{FreeBSD-Benutzer!Warnung f\"{u}r} + +Solange die Pthreads-Bibliothek der meisten FreeBSD-Systeme nicht gepatcht ist, werden Sie Daten verlieren, wenn Sie mit Bacula B\"{a}nder vollschreiben. Die ungepatchte Pthreads-Bibliothek ist nicht in der Lage, Bacula eine Warnung zur\"{u}ckzugeben, wenn das Bandende naht. Beachten Sie bitte das Kapitel zum Test der B\"{a}nder in diesem Handbuch mit \textbf{wichtigen} Informationen, wie man das Bandlaufwerk so konfiguriert, dass es zu Bacula kompatibel ist. + + +\section{Unterst\"{u}tzte Autochanger} +\index[general]{Autochanger!unterst\"{u}tzte } +\index[general]{Unterst\"{u}tzte Autochanger } + +Informationen zu den unterst\"{u}tzten Autochangern stehen im Abschnitt +\ilink{Autochangers Known to Work with Bacula}{Models} +im Kapitel ``Unterst\"{u}tzte Autochanger'' dieses Handbuches. + +\section{Band-Spezifikationen} +\index[general]{Spezifikationen!Band-} +\index[general]{Band-Spezifikationen} +Wir k\"{o}nnen Ihnen wirklich nicht sagen welche B\"{a}nder zusammen mit Bacula funktionieren werden. Wenn Sie ein Laufwerk kaufen wollen, sollten Sie versuchen, DDS-Laufwerke zu vermeiden. Deren Technologie ist relativ alt und die Laufwerke ben\"{o}tigen regelm\"{a}{\ss}ige Reinigung. DLT-Laufwerke sind im allgemeinen viel besser (neuere Technologie) und ben\"{o}tigen keine regelm\"{a}{\ss}ige Reinigung. + +Unten ist eine Tabelle mit den Spezifikationen von DLT- und LTO-B\"{a}ndern, die Ihnen einen Eindruck der Geschwindigkeit und Kapazit\"{a}t aktueller B\"{a}nder geben soll. Die aufgef\"{u}hrte Kapazit\"{a}t ist die reine Bandkapazit\"{a}t ohne Kompression. Alle modernen Laufwerke arbeiten mit Hardware-Kompression und die Hersteller geben oft eine Kompressionsrate von 2:1 an. Die tats\"{a}chliche Kompressionsrate h\"{a}ngt haupts\"{a}chlich von den zu sichernden Daten ab, aber ich finde 1,5:1 ist ein viel vern\"{u}nftigerer Wert (multiplizieren Sie die Werte der Tabelle mit 1,5 und Sie werden ein grobes Mittel dessen erhalten, was Sie m\"{o}glicherweise sehen werden). Die Transferraten sind auf den n\"{a}chsten GB/hr-Wert gerundet. Die Werte wurden von verschiedenen Herstellern zur Verf\"{u}gung gestellt. +In der Spalte ``Medien Typ'' stehen die Benennungen der Hersteller. Es ist nicht notwendig, diese Namen in den Konfigurationsdateien von Bacula zu benutzen. Allerdings k\"{o}nnen Sie das tun. + + + \begin{tabular}{|c|c|c|c} + Medien Typ & Laufwerks-Type & Medien Kapazit\"{a}t & Transferrate \\ \hline + DDS-1 & DAT & 2 GB & ?? GB/hr \\ \hline + DDS-2 & DAT & 4 GB & ?? GB/hr \\ \hline + DDS-3 & DAT & 12 GB & 5.4 GB/hr \\ \hline + Travan 40 & Travan & 20 GB & ?? GB/hr \\ \hline + DDS-4 & DAT & 20 GB & 11 GB/hr \\ \hline + VXA-1 & Exabyte & 33 GB & 11 GB/hr \\ \hline + DAT-72 & DAT & 36 GB & 13 GB/hr \\ \hline + DLT IV & DLT8000 & 40 GB & 22 GB/hr \\ \hline + VXA-2 & Exabyte & 80 GB & 22 GB/hr \\ \hline + Half-high Ultrum 1 & LTO 1 & 100 GB & 27 GB/hr \\ \hline + Ultrium 1 & LTO 1 & 100 GB & 54 GB/hr \\ \hline + Super DLT 1 & SDLT 220 & 110 GB & 40 GB/hr \\ \hline + VXA-3 & Exabyte & 160 GB & 43 GB/hr \\ \hline + Super DLT I & SDLT 320 & 160 GB & 58 GB/hr \\ \hline + Ultrium 2 & LTO 2 & 200 GB & 108 GB/hr \\ \hline + Super DLT II & SDLT 600 & 300 GB & 127 GB/hr \\ \hline + VXA-4 & Exabyte & 320 GB & 86 GB/hr \\ \hline + Ultrium 3 & LTO 3 & 400 GB & 216 GB/hr \\ \hline + \end{tabular} + diff --git a/docs/manuals/de/concepts/supportedoses.tex b/docs/manuals/de/concepts/supportedoses.tex new file mode 100644 index 00000000..78b7a498 --- /dev/null +++ b/docs/manuals/de/concepts/supportedoses.tex @@ -0,0 +1,34 @@ +%% +%% + +\chapter{Unterst\"{u}tzte Betriebssysteme} +\label{SupportedOSes} +\index[general]{Betriebssysteme!Unterst\"{u}tzte } +\index[general]{Unterst\"{u}tzte Betriebssysteme } + +\begin{itemize} +\item Linux-Systeme (kompiliert und getestet unter ``RedHat Enterprise Linux 3.0''). +\item Wenn Sie ein neueres ``RedHat'' Linux-System mit Kernel 2.4.x haben und im System ein Verzeichnis {\bf /lib/tls} angelegt ist (normalerweise voreingestellt), wird Bacula {\bf NICHT} starten. Dies liegt an der neuen Pthreads-Bibliothek, die fehlerhaft ist. Um Bacula zum laufen zu bringen, muss dieses Verzeichnis entfernt oder umbenannt und der Computer neu gestartet werden (eine der seltenen Gelegenheiten, bei denen man Linux neu booten muss). Sollte es nicht m\"{o}glich sein, /lib/tls zu entfernen oder umzubenennen, setzt man stattdessen die Umgebungsvariable ``LD\_ASSUME\_KERNEL=2.4.19'' bevor man Bacula startet. Hierbei muss der Rechner nicht neu gestartet werden und alle anderen Programme werden /lib/tls weiterhin benutzen. + +Aus R\"{u}ckmeldungen unsere Benutzer wissen wir, dass das Problem auch mit Kerneln der Version 2.6 besteht. Hier w\"{u}rden wir eher dazu raten die Umgebungsvariable neu zu setzen (LD\_ASSUME\_KERNEL=2.4.19), als das Verzeichnis /lib/tls zu entfernen. + +\item die meisten Linux-Distributionen (Gentoo, SuSE, Mandriva, Debian...). + +\item verschiedene Solaris-Versionen. + +\item FreeBSD (zur Unterst\"{u}tzung der Bandlaufwerke in Version 1.30 lesen Sie bitte die \textbf{wichtige} Hinweise im Abschnitt \ilink{Band-Modi unter FreeBSD}{FreeBSDTapes} des Kapitels zum Test der Bandlaufwerke in diesem Handbuch.) + +\item Windows (Win98/Me, WinNT/2K/XP) Client-Programm (File-D\"{a}mon). + +\item MacOS X/Darwin (N\"{a}heres zum Bezug der Pakete unter \elink{ http://fink.sourceforge.net/}{http://fink.sourceforge.net/}) + +\item OpenBSD Client (File-D\"{a}mon). +\item Irix Client (File-D\"{a}mon). +\item Tru64 + +\item es hei{\ss}t, Bacula funktioniere auch auf anderen Systemen (AIX, BSDI, HPUX...) doch haben wir mit diesen Systemen keine eigenen Erfahrungen. + +\item RedHat 7.2 AS2, AS3, AS4, Fedora Core 2, SuSE SLES 7,8,9 und Debian Woody und Sarge Linux auf S/390 und Linux auf zSeries. + +\item Lesen Sie zur Portierung im ``Bacula Developer's Guide'' Informationen, wie man Bacula auf andere Systeme \"{u}bertr\"{a}gt. + \end{itemize} diff --git a/docs/manuals/de/concepts/thanks.tex b/docs/manuals/de/concepts/thanks.tex new file mode 100644 index 00000000..aa324925 --- /dev/null +++ b/docs/manuals/de/concepts/thanks.tex @@ -0,0 +1,102 @@ +%% +%% + +\chapter{Thanks} +\label{ThanksChapter} +\index[general]{Thanks } +I thank everyone who has helped this project. Unfortunately, I cannot +thank everyone (bad memory). However, the AUTHORS file in the main source +code directory should include the names of all persons who have contributed +to the Bacula project. Just the same, I would like to include thanks below +to special contributors as well as to the major contributors to the current +release. + +Thanks to Richard Stallman for starting the Free Software movement and for +bringing us gcc and all the other GNU tools as well as the GPL license. + +Thanks to Linus Torvalds for bringing us Linux. + +Thanks to all the Free Software programmers. Without being able to peek at +your code, and in some cases, take parts of it, this project would have been +much more difficult. + +Thanks to John Walker for suggesting this project, giving it a name, +contributing software he has written, and for his programming efforts on +Bacula as well as having acted as a constant sounding board and source of +ideas. + +Thanks to the apcupsd project where I started my Free Software efforts, and +from which I was able to borrow some ideas and code that I had written. + +Special thanks to D. Scott Barninger for writing the bacula RPM spec file, +building all the RPM files and loading them onto Source Forge. This has been a +tremendous help. + +Many thanks to Karl Cunningham for converting the manual from html format to +LaTeX. It was a major effort flawlessly done that will benefit the Bacula +users for many years to come. Thanks Karl. + +Thanks to Dan Langille for the {\bf incredible} amount of testing he did on +FreeBSD. His perseverance is truly remarkable. Thanks also for the many +contributions he has made to improve Bacula (pthreads patch for FreeBSD, +improved start/stop script and addition of Bacula userid and group, stunnel, +...), his continuing support of Bacula users. He also wrote the PostgreSQL +driver for Bacula and has been a big help in correcting the SQL. + +Thanks to multiple other Bacula Packagers who make and release packages for +different platforms for Bacula. + +Thanks to Christopher Hull for developing the native Win32 Bacula emulation +code and for contributing it to the Bacula project. + +Thanks to Robert Nelson for bringing our Win32 implementation up to par +with all the same features that exist in the Unix/Linux versions. In +addition, he has ported the Director and Storage daemon to Win32! + +Thanks to Thorsten Engel for his excellent knowledge of Win32 systems, and +for making the Win32 File daemon Unicode compatible, as well as making +the Win32 File daemon interface to Microsoft's Volume Shadow Copy (VSS). +These two are big pluses for Bacula! + +Thanks to Landon Fuller for writing both the communications and the +data encryption code for Bacula. + +Thanks to Arno Lehmann for his excellent and infatigable help and advice +to users. + +Thanks to all the Bacula users, especially those of you who have contributed +ideas, bug reports, patches, and new features. + +Bacula can be enabled with data encryption and/or communications +encryption. If this is the case, you will be including OpenSSL code that +that contains cryptographic software written by Eric Young +(eay@cryptsoft.com) and also software written by Tim Hudson +(tjh@cryptsoft.com). + +The Bat (Bacula Administration Tool) graphs are based in part on the work +of the Qwt project (http://qwt.sf.net). + +The original variable expansion code used in the LabelFormat comes from the +Open Source Software Project (www.ossp.org). It has been adapted and extended +for use in Bacula. This code is now deprecated. + +There have been numerous people over the years who have contributed ideas, +code, and help to the Bacula project. The file AUTHORS in the main source +release file contains a list of contributors. For all those who I have +left out, please send me a reminder, and in any case, thanks for your +contribution. + +Thanks to the Free Software Foundation Europe e.V. for assuming the +responsibilities of protecting the Bacula copyright. + +% TODO: remove this from the book? +\section*{Copyrights and Trademarks} +\index[general]{Trademarks!Copyrights and } +\index[general]{Copyrights and Trademarks } + +Certain words and/or products are Copyrighted or Trademarked such as Windows +(by Microsoft). Since they are numerous, and we are not necessarily aware of +the details of each, we don't try to list them here. However, we acknowledge +all such Copyrights and Trademarks, and if any copyright or trademark holder +wishes a specific acknowledgment, notify us, and we will be happy to add it +where appropriate. diff --git a/docs/manuals/de/concepts/tls.tex b/docs/manuals/de/concepts/tls.tex new file mode 100644 index 00000000..6c90e110 --- /dev/null +++ b/docs/manuals/de/concepts/tls.tex @@ -0,0 +1,315 @@ + +\chapter{Bacula TLS -- Communications Encryption} +\label{CommEncryption} +\index[general]{TLS -- Communications Encryption} +\index[general]{Communications Encryption} +\index[general]{Encryption!Communications} +\index[general]{Encryption!Transport} +\index[general]{Transport Encryption} +\index[general]{TLS} + +Bacula TLS (Transport Layer Security) is built-in network +encryption code to provide secure network transport similar to +that offered by {\bf stunnel} or {\bf ssh}. The data written to +Volumes by the Storage daemon is not encrypted by this code. +For data encryption, please see the \ilink{Data Encryption +Chapter}{DataEncryption} of this manual. + +The Bacula encryption implementations were written by Landon Fuller. + +Supported features of this code include: +\begin{itemize} +\item Client/Server TLS Requirement Negotiation +\item TLSv1 Connections with Server and Client Certificate +Validation +\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying +\end{itemize} + +This document will refer to both "server" and "client" contexts. These +terms refer to the accepting and initiating peer, respectively. + +Diffie-Hellman anonymous ciphers are not supported by this code. The +use of DH anonymous ciphers increases the code complexity and places +explicit trust upon the two-way CRAM-MD5 implementation. CRAM-MD5 is +subject to known plaintext attacks, and it should be considered +considerably less secure than PKI certificate-based authentication. + +Appropriate autoconf macros have been added to detect and use OpenSSL +if enabled on the {\bf ./configure} line with {\bf \verb?--?with-openssl} + +\section{TLS Configuration Directives} +Additional configuration directives have been added to all the daemons +(Director, File daemon, and Storage daemon) as well as the various +different Console programs. +These new directives are defined as follows: + +\begin{description} +\item [TLS Enable = \lt{}yes|no\gt{}] +Enable TLS support. If TLS is not enabled, none of the other TLS directives +have any effect. In other words, even if you set {\bf TLS Require = yes} +you need to have TLS enabled or TLS will not be used. + +\item [TLS Require = \lt{}yes|no\gt{}] +Require TLS connections. This directive is ignored unless {\bf TLS Enable} +is set to {\bf yes}. If TLS is not required, and TLS is enabled, then +Bacula will connect with other daemons either with or without TLS depending +on what the other daemon requests. If TLS is enabled and TLS is required, +then Bacula will refuse any connection that does not use TLS. + +\item [TLS Certificate = \lt{}Filename\gt{}] +The full path and filename of a PEM encoded TLS certificate. It can be +used as either a client or server certificate. PEM stands for Privacy +Enhanced Mail, but in this context refers to how the certificates are +encoded. It is used because PEM files are base64 encoded and hence ASCII +text based rather than binary. They may also contain encrypted +information. + +\item [TLS Key = \lt{}Filename\gt{}] +The full path and filename of a PEM encoded TLS private key. It must +correspond to the TLS certificate. + +\item [TLS Verify Peer = \lt{}yes|no\gt{}] +Verify peer certificate. Instructs server to request and verify the +client's x509 certificate. Any client certificate signed by a known-CA +will be accepted unless the TLS Allowed CN configuration directive is used, +in which case the client certificate must correspond to the Allowed +Common Name specified. This directive is valid only for a server +and not in a client context. + +\item [TLS Allowed CN = \lt{}string list\gt{}] +Common name attribute of allowed peer certificates. If this directive is +specified, all server certificates will be verified against this list. This +can be used to ensure that only the CA-approved Director may connect. +This directive may be specified more than once. + +\item [TLS CA Certificate File = \lt{}Filename\gt{}] +The full path and filename specifying a +PEM encoded TLS CA certificate(s). Multiple certificates are +permitted in the file. One of \emph{TLS CA Certificate File} or \emph{TLS +CA Certificate Dir} are required in a server context if \emph{TLS +Verify Peer} (see above) is also specified, and are always required in a client +context. + +\item [TLS CA Certificate Dir = \lt{}Directory\gt{}] +Full path to TLS CA certificate directory. In the current implementation, +certificates must be stored PEM encoded with OpenSSL-compatible hashes, +which is the subject name's hash and an extension of {bf .0}. +One of \emph{TLS CA Certificate File} or \emph{TLS CA Certificate Dir} are +required in a server context if \emph{TLS Verify Peer} is also specified, +and are always required in a client context. + +\item [TLS DH File = \lt{}Directory\gt{}] +Path to PEM encoded Diffie-Hellman parameter file. If this directive is +specified, DH key exchange will be used for the ephemeral keying, allowing +for forward secrecy of communications. DH key exchange adds an additional +level of security because the key used for encryption/decryption by the +server and the client is computed on each end and thus is never passed over +the network if Diffie-Hellman key exchange is used. Even if DH key +exchange is not used, the encryption/decryption key is always passed +encrypted. This directive is only valid within a server context. + +To generate the parameter file, you +may use openssl: + +\begin{verbatim} + openssl dhparam -out dh1024.pem -5 1024 +\end{verbatim} + +\end{description} + +\section{Creating a Self-signed Certificate} +\index[general]{Creating a Self-signed Certificate } +\index[general]{Certificate!Creating a Self-signed } + +You may create a self-signed certificate for use with the Bacula TLS that +will permit you to make it function, but will not allow certificate +validation. The .pem file containing both the certificate and the key +valid for ten years can be made with the following: + +\footnotesize +\begin{verbatim} + openssl req -new -x509 -nodes -out bacula.pem -keyout bacula.pem -days 3650 +\end{verbatim} +\normalsize + +The above script will ask you a number of questions. You may simply answer +each of them by entering a return, or if you wish you may enter your own data. + +Note, however, that self-signed certificates will only work for the +outgoing end of connections. For example, in the case of the Director +making a connection to a File Daemon, the File Daemon may be configured to +allow self-signed certificates, but the certificate used by the +Director must be signed by a certificate that is explicitly trusted on the +File Daemon end. + +This is necessary to prevent ``man in the middle'' attacks from tools such +as \elink{ettercap}{http://ettercap.sourceforge.net/}. Essentially, if the +Director does not verify that it is talking to a trusted remote endpoint, +it can be tricked into talking to a malicious 3rd party who is relaying and +capturing all traffic by presenting its own certificates to the Director +and File Daemons. The only way to prevent this is by using trusted +certificates, so that the man in the middle is incapable of spoofing the +connection using his own. + +To get a trusted certificate (CA or Certificate Authority signed +certificate), you will either need to purchase certificates signed by a +commercial CA or find a friend that has setup his own CA or become a CA +yourself, and thus you can sign all your own certificates. The book +OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly explains +how to do it, or you can read the documentation provided in the Open-source +PKI Book project at Source Forge: \elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +The program TinyCA has a very nice Graphical User Interface +that allows you to easily setup and maintain your own CA. +TinyCA can be found at +\elink{http://tinyca.sm-zone.net/}{http://tinyca.sm-zone.net/}. + + +\section{Getting a CA Signed Certificate} +\index[general]{Certificate!Getting a CA Signed } +\index[general]{Getting a CA Signed Certificate } + +The process of getting a certificate that is signed by a CA is quite a bit +more complicated. You can purchase one from quite a number of PKI vendors, but +that is not at all necessary for use with Bacula. To get a CA signed +certificate, you will either need to find a friend that has setup his own CA +or to become a CA yourself, and thus you can sign all your own certificates. +The book OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly +explains how to do it, or you can read the documentation provided in the +Open-source PKI Book project at Source Forge: +\elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +\section{Example TLS Configuration Files} +\index[general]{Example!TLS Configuration Files} +\index[general]{TLS Configuration Files} + +Landon has supplied us with the TLS portions of his configuration +files, which should help you setting up your own. Note, this example +shows the directives necessary for a Director to Storage daemon session. +The technique is the same between the Director and the Client and +for bconsole to the Director. + +{\bf bacula-dir.conf} +\footnotesize +\begin{verbatim} + Director { # define myself + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + TLS Verify Peer = yes + TLS Allowed CN = "bacula@backup1.example.com" + TLS Allowed CN = "administrator@example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate, used for incoming + # console connections. + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } + + Storage { + Name = File + Address = backup1.example.com + ... + TLS Require = yes + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a client certificate, used by the director to + # connect to the storage daemon + TLS Certificate = /usr/local/etc/ssl/bacula@backup1/cert.pem + TLS Key = /usr/local/etc/ssl/bacula@backup1/key.pem + } + + Client { + Name = backup1-fd + Address = server1.example.com + ... + + TLS Enable = yes + TLS Require = yes + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + } + +\end{verbatim} +\normalsize + +{\bf bacula-fd.conf} +\footnotesize +\begin{verbatim} + Director { + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + TLS Verify Peer = yes + # Allow only the Director to connect + TLS Allowed CN = "bacula@backup1.example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by connecting + # directors to verify the authenticity of this file daemon + TLS Certificate = /usr/local/etc/ssl/server1/cert.pem + TLS Key = /usr/local/etc/ssl/server1/key.pem + } + + FileDaemon { + Name = backup1-fd + ... + # you need these TLS entries so the SD and FD can + # communicate + TLS Enable = yes + TLS Require = yes + + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + TLS Certificate = /usr/local/etc/ssl/server1/cert.pem + TLS Key = /usr/local/etc/ssl/server1/key.pem +} +\end{verbatim} +\normalsize + +{\bf bacula-sd.conf} +\footnotesize +\begin{verbatim} + Storage { # definition of myself + Name = backup1-sd + ... + # These TLS configuration options are used for incoming + # file daemon connections. Director TLS settings are handled + # below. + TLS Enable = yes + TLS Require = yes + # Peer certificate is not required/requested -- peer validity + # is verified by the storage connection cookie provided to the + # File Daemon by the director. + TLS Verify Peer = no + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by connecting + # file daemons to verify the authenticity of this storage daemon + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } + + # + # List Directors who are permitted to contact Storage daemon + # + Director { + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + # Require the connecting director to provide a certificate + # with the matching CN. + TLS Verify Peer = yes + TLS Allowed CN = "bacula@backup1.example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by the connecting + # director to verify the authenticity of this storage daemon + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/concepts/translate_images.pl b/docs/manuals/de/concepts/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/concepts/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/concepts/tutorial.tex b/docs/manuals/de/concepts/tutorial.tex new file mode 100644 index 00000000..7062a978 --- /dev/null +++ b/docs/manuals/de/concepts/tutorial.tex @@ -0,0 +1,1373 @@ +%% +%% + +\chapter{A Brief Tutorial} +\label{TutorialChapter} +\index[general]{Brief Tutorial } +\index[general]{Tutorial!Brief } + +This chapter will guide you through running Bacula. To do so, we assume you +have installed Bacula, possibly in a single file as shown in the previous +chapter, in which case, you can run Bacula as non-root for these tests. +However, we assume that you have not changed the .conf files. If you have +modified the .conf files, please go back and uninstall Bacula, then reinstall +it, but do not make any changes. The examples in this chapter use the default +configuration files, and will write the volumes to disk in your {\bf /tmp} +directory, in addition, the data backed up will be the source directory where +you built Bacula. As a consequence, you can run all the Bacula daemons for +these tests as non-root. Please note, in production, your File daemon(s) must +run as root. See the Security chapter for more information on this subject. + +% TODO: use crossreferences above +% TODO: add a section here + +The general flow of running Bacula is: + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} +\item Start the Database (if using MySQL or PostgreSQL) +\item Start the Daemons with {\bf ./bacula start} +\item Start the Console program to interact with the Director +\item Run a job +\item When the Volume fills, unmount the Volume, if it is a tape, label a new + one, and continue running. In this chapter, we will write only to disk files + so you won't need to worry about tapes for the moment. +\item Test recovering some files from the Volume just written to ensure the + backup is good and that you know how to recover. Better test before disaster + strikes +\item Add a second client. + \end{enumerate} + +Each of these steps is described in more detail below. + +\section{Before Running Bacula} +\index[general]{Bacula!Before Running } +\index[general]{Before Running Bacula } + +% TODO: some of this content is already covered once or twice critical +% TODO: or quickstart. Consolidate! + +Before running Bacula for the first time in production, we recommend that you +run the {\bf test} command in the {\bf btape} program as described in the +\ilink{Utility Program Chapter}{btape} of this manual. This will +help ensure that Bacula functions correctly with your tape drive. If you have +a modern HP, Sony, or Quantum DDS or DLT tape drive running on Linux or +Solaris, you can probably skip this test as Bacula is well tested with these +drives and systems. For all other cases, you are {\bf strongly} encouraged to +run the test before continuing. {\bf btape} also has a {\bf fill} command that +attempts to duplicate what Bacula does when filling a tape and writing on the +next tape. You should consider trying this command as well, but be forewarned, +it can take hours (about four hours on my drive) to fill a large capacity tape. + +\section{Starting the Database} +\label{StartDB} +\index[general]{Starting the Database } +\index[general]{Database!Starting the } + +If you are using MySQL or PostgreSQL as the Bacula database, you should start +it before you attempt to run a job to avoid getting error messages from Bacula +when it starts. The scripts {\bf startmysql} and {\bf stopmysql} are what I +(Kern) use to start and stop my local MySQL. Note, if you are using SQLite, +you will not want to use {\bf startmysql} or {\bf stopmysql}. If you are +running this in production, you will probably want to find some way to +automatically start MySQL or PostgreSQL after each system reboot. + +If you are using SQLite (i.e. you specified the {\bf \verb:--:with-sqlite=xxx} option +on the {\bf ./configure} command, you need do nothing. SQLite is automatically +started by {\bf Bacula}. + +\section{Starting the Daemons} +\label{StartDaemon} +\index[general]{Starting the Daemons } +\index[general]{Daemons!Starting the } + +Assuming you have built from source or have installed the rpms, +to start the three daemons, from your installation directory, simply enter: + +./bacula start + +The {\bf bacula} script starts the Storage daemon, the File daemon, and the +Director daemon, which all normally run as daemons in the background. If you +are using the autostart feature of Bacula, your daemons will either be +automatically started on reboot, or you can control them individually with the +files {\bf bacula-dir}, {\bf bacula-fd}, and {\bf bacula-sd}, which are +usually located in {\bf /etc/init.d}, though the actual location is system +dependent. +Some distributions may do this differently. + +Note, on Windows, currently only the File daemon is ported, and it must be +started differently. Please see the +\ilink{Windows Version of Bacula}{Win32Chapter} Chapter of this +manual. + +The rpm packages configure the daemons to run as user=root and group=bacula. +The rpm installation also creates the group bacula if it does not exist on the +system. Any users that you add to the group bacula will have access to files +created by the daemons. To disable or alter this behavior edit the daemon +startup scripts: + +\begin{itemize} +\item /etc/bacula/bacula +\item /etc/init.d/bacula-dir +\item /etc/init.d/bacula-sd +\item /etc/init.d/bacula-fd + \end{itemize} + +and then restart as noted above. + +The +\ilink{installation chapter}{InstallChapter} of this manual +explains how you can install scripts that will automatically restart the +daemons when the system starts. + +\section{Using the Director to Query and Start Jobs} +\index[general]{Jobs!Querying or Starting Jobs} +\index[general]{Querying or starting Jobs} +% TODO: section name is too long; maybe use "Using the Console Program" ?? + +To communicate with the director and to query the state of Bacula or run jobs, +from the top level directory, simply enter: + +./bconsole + +Alternatively to running the command line console, if you have +Qt4 installed and used the {\bf \verb:--:enable-bat} on the configure command, +you may use the Bacula Administration Tool ({\bf bat}): + +./bat + +Which has a graphical interface, and many more features than bconsole. + +Two other possibilities are to run the GNOME console +{\bf bgnome-console} or the wxWidgets program {\bf bwx-console}. + +For simplicity, here we will describe only the {\bf ./bconsole} program. Most +of what is described here applies equally well to {\bf ./bat}, +{\bf ./bgnome-console}, and to {\bf bwx-console}. + +The {\bf ./bconsole} runs the Bacula Console program, which connects to the +Director daemon. Since Bacula is a network program, you can run the Console +program anywhere on your network. Most frequently, however, one runs it on the +same machine as the Director. Normally, the Console program will print +something similar to the following: + +\footnotesize +\begin{verbatim} +[kern@polymatou bin]$ ./bconsole +Connecting to Director lpmatou:9101 +1000 OK: HeadMan Version: 2.1.8 (14 May 2007) +* +\end{verbatim} +\normalsize + +the asterisk is the console command prompt. + +Type {\bf help} to see a list of available commands: + +\footnotesize +\begin{verbatim} +*help + Command Description + ======= =========== + add add media to a pool + autodisplay autodisplay [on|off] -- console messages + automount automount [on|off] -- after label + cancel cancel [ | ] -- cancel a job + create create DB Pool from resource + delete delete [pool= | media volume=] + disable disable -- disable a job + enable enable -- enable a job + estimate performs FileSet estimate, listing gives full listing + exit exit = quit + gui gui [on|off] -- non-interactive gui mode + help print this command + list list [pools | jobs | jobtotals | media | +files ]; from catalog + label label a tape + llist full or long list like list command + memory print current memory usage + messages messages + mount mount + prune prune expired records from catalog + purge purge records from catalog + python python control commands + quit quit + query query catalog + restore restore files + relabel relabel a tape + release release + reload reload conf file + run run + status status [storage | client]= + setdebug sets debug level + setip sets new client address -- if authorized + show show (resource records) [jobs | pools | ... | all] + sqlquery use SQL to query catalog + time print current time + trace turn on/off trace to file + unmount unmount + umount umount for old-time Unix guys + update update Volume, Pool or slots + use use catalog xxx + var does variable expansion + version print Director version + wait wait until no jobs are running [ | | ] +* +\end{verbatim} +\normalsize + +Details of the console program's commands are explained in the +\ilink{Console Chapter}{_ConsoleChapter} of this manual. + +\section{Running a Job} +\label{Running} +\index[general]{Job!Running a } +\index[general]{Running a Job } + +At this point, we assume you have done the following: + +\begin{itemize} +\item Configured Bacula with {\bf ./configure \verb:--:your-options} +\item Built Bacula using {\bf make} +\item Installed Bacula using {\bf make install} +\item Have created your database with, for example, {\bf + ./create\_sqlite\_database} +\item Have created the Bacula database tables with, {\bf + ./make\_bacula\_tables} +\item Have possibly edited your {\bf bacula-dir.conf} file to personalize it + a bit. BE CAREFUL! if you change the Director's name or password, you will + need to make similar modifications in the other .conf files. For the moment + it is probably better to make no changes. +\item You have started Bacula with {\bf ./bacula start} +\item You have invoked the Console program with {\bf ./bconsole} +\end{itemize} + +Furthermore, we assume for the moment you are using the default configuration +files. + +At this point, enter the following command: + +\footnotesize +\begin{verbatim} +show filesets +\end{verbatim} +\normalsize + +and you should get something similar to: + +\footnotesize +\begin{verbatim} +FileSet: name=Full Set + O M + N + I /home/kern/bacula/regress/build + N + E /proc + E /tmp + E /.journal + E /.fsck + N +FileSet: name=Catalog + O M + N + I /home/kern/bacula/regress/working/bacula.sql + N +\end{verbatim} +\normalsize + +This is a pre-defined {\bf FileSet} that will backup the Bacula source +directory. The actual directory names printed should correspond to your system +configuration. For testing purposes, we have chosen a directory of moderate +size (about 40 Megabytes) and complexity without being too big. The FileSet +{\bf Catalog} is used for backing up Bacula's catalog and is not of interest +to us for the moment. The {\bf I} entries are the files or directories that +will be included in the backup and the {\bf E} are those that will be +excluded, and the {\bf O} entries are the options specified for +the FileSet. You can change what is backed up by editing {\bf bacula-dir.conf} +and changing the {\bf File =} line in the {\bf FileSet} resource. + +Now is the time to run your first backup job. We are going to backup your +Bacula source directory to a File Volume in your {\bf /tmp} directory just to +show you how easy it is. Now enter: + +\footnotesize +\begin{verbatim} +status dir +\end{verbatim} +\normalsize + +and you should get the following output: + +\footnotesize +\begin{verbatim} +rufus-dir Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Console connected at 28-Apr-2003 14:03 +No jobs are running. +Level Type Scheduled Name +================================================================= +Incremental Backup 29-Apr-2003 01:05 Client1 +Full Backup 29-Apr-2003 01:10 BackupCatalog +==== +\end{verbatim} +\normalsize + +where the times and the Director's name will be different according to your +setup. This shows that an Incremental job is scheduled to run for the Job {\bf +Client1} at 1:05am and that at 1:10, a {\bf BackupCatalog} is scheduled to +run. Note, you should probably change the name {\bf Client1} to be the name of +your machine, if not, when you add additional clients, it will be very +confusing. For my real machine, I use {\bf Rufus} rather than {\bf Client1} as +in this example. + +Now enter: + +\footnotesize +\begin{verbatim} +status client +\end{verbatim} +\normalsize + +and you should get something like: + +\footnotesize +\begin{verbatim} +The defined Client resources are: + 1: rufus-fd +Item 1 selected automatically. +Connecting to Client rufus-fd at rufus:8102 +rufus-fd Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Director connected at: 28-Apr-2003 14:14 +No jobs running. +==== +\end{verbatim} +\normalsize + +In this case, the client is named {\bf rufus-fd} your name will be different, +but the line beginning with {\bf rufus-fd Version ...} is printed by your File +daemon, so we are now sure it is up and running. + +Finally do the same for your Storage daemon with: + +\footnotesize +\begin{verbatim} +status storage +\end{verbatim} +\normalsize + +and you should get: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File +Item 1 selected automatically. +Connecting to Storage daemon File at rufus:8103 +rufus-sd Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Device /tmp is not open. +No jobs running. +==== +\end{verbatim} +\normalsize + +You will notice that the default Storage daemon device is named {\bf File} and +that it will use device {\bf /tmp}, which is not currently open. + +Now, let's actually run a job with: + +\footnotesize +\begin{verbatim} +run +\end{verbatim} +\normalsize + +you should get the following output: + +\footnotesize +\begin{verbatim} +Using default Catalog name=MyCatalog DB=bacula +A job name must be specified. +The defined Job resources are: + 1: Client1 + 2: BackupCatalog + 3: RestoreFiles +Select Job resource (1-3): +\end{verbatim} +\normalsize + +Here, Bacula has listed the three different Jobs that you can run, and you +should choose number {\bf 1} and type enter, at which point you will get: + +\footnotesize +\begin{verbatim} +Run Backup job +JobName: Client1 +FileSet: Full Set +Level: Incremental +Client: rufus-fd +Storage: File +Pool: Default +When: 2003-04-28 14:18:57 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +At this point, take some time to look carefully at what is printed and +understand it. It is asking you if it is OK to run a job named {\bf Client1} +with FileSet {\bf Full Set} (we listed above) as an Incremental job on your +Client (your client name will be different), and to use Storage {\bf File} and +Pool {\bf Default}, and finally, it wants to run it now (the current time +should be displayed by your console). + +Here we have the choice to run ({\bf yes}), to modify one or more of the above +parameters ({\bf mod}), or to not run the job ({\bf no}). Please enter {\bf +yes}, at which point you should immediately get the command prompt (an +asterisk). If you wait a few seconds, then enter the command {\bf messages} +you will get back something like: + +\footnotesize +\begin{verbatim} +28-Apr-2003 14:22 rufus-dir: Last FULL backup time not found. Doing + FULL backup. +28-Apr-2003 14:22 rufus-dir: Start Backup JobId 1, + Job=Client1.2003-04-28_14.22.33 +28-Apr-2003 14:22 rufus-sd: Job Client1.2003-04-28_14.22.33 waiting. + Cannot find any appendable volumes. +Please use the "label" command to create a new Volume for: + Storage: FileStorage + Media type: File + Pool: Default +\end{verbatim} +\normalsize + +The first message, indicates that no previous Full backup was done, so Bacula +is upgrading our Incremental job to a Full backup (this is normal). The second +message indicates that the job started with JobId 1., and the third message +tells us that Bacula cannot find any Volumes in the Pool for writing the +output. This is normal because we have not yet created (labeled) any Volumes. +Bacula indicates to you all the details of the volume it needs. + +At this point, the job is BLOCKED waiting for a Volume. You can check this if +you want by doing a {\bf status dir}. In order to continue, we must create a +Volume that Bacula can write on. We do so with: + +\footnotesize +\begin{verbatim} +label +\end{verbatim} +\normalsize + +and Bacula will print: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File +Item 1 selected automatically. +Enter new Volume name: +\end{verbatim} +\normalsize + +at which point, you should enter some name beginning with a letter and +containing only letters and numbers (period, hyphen, and underscore) are also +permitted. For example, enter {\bf TestVolume001}, and you should get back: + +\footnotesize +\begin{verbatim} +Defined Pools: + 1: Default +Item 1 selected automatically. +Connecting to Storage daemon File at rufus:8103 ... +Sending label command for Volume "TestVolume001" Slot 0 ... +3000 OK label. Volume=TestVolume001 Device=/tmp +Catalog record for Volume "TestVolume002", Slot 0 successfully created. +Requesting mount FileStorage ... +3001 OK mount. Device=/tmp +\end{verbatim} +\normalsize + +Finally, enter {\bf messages} and you should get something like: + +\footnotesize +\begin{verbatim} +28-Apr-2003 14:30 rufus-sd: Wrote label to prelabeled Volume + "TestVolume001" on device /tmp +28-Apr-2003 14:30 rufus-dir: Bacula 1.30 (28Apr03): 28-Apr-2003 14:30 +JobId: 1 +Job: Client1.2003-04-28_14.22.33 +FileSet: Full Set +Backup Level: Full +Client: rufus-fd +Start time: 28-Apr-2003 14:22 +End time: 28-Apr-2003 14:30 +Files Written: 1,444 +Bytes Written: 38,988,877 +Rate: 81.2 KB/s +Software Compression: None +Volume names(s): TestVolume001 +Volume Session Id: 1 +Volume Session Time: 1051531381 +Last Volume Bytes: 39,072,359 +FD termination status: OK +SD termination status: OK +Termination: Backup OK +28-Apr-2003 14:30 rufus-dir: Begin pruning Jobs. +28-Apr-2003 14:30 rufus-dir: No Jobs found to prune. +28-Apr-2003 14:30 rufus-dir: Begin pruning Files. +28-Apr-2003 14:30 rufus-dir: No Files found to prune. +28-Apr-2003 14:30 rufus-dir: End auto prune. +\end{verbatim} +\normalsize + +If you don't see the output immediately, you can keep entering {\bf messages} +until the job terminates, or you can enter, {\bf autodisplay on} and your +messages will automatically be displayed as soon as they are ready. + +If you do an {\bf ls -l} of your {\bf /tmp} directory, you will see that you +have the following item: + +\footnotesize +\begin{verbatim} +-rw-r----- 1 kern kern 39072153 Apr 28 14:30 TestVolume001 +\end{verbatim} +\normalsize + +This is the file Volume that you just wrote and it contains all the data of +the job just run. If you run additional jobs, they will be appended to this +Volume unless you specify otherwise. + +You might ask yourself if you have to label all the Volumes that Bacula is +going to use. The answer for disk Volumes, like the one we used, is no. It is +possible to have Bacula automatically label volumes. For tape Volumes, you +will most likely have to label each of the Volumes you want to use. + +If you would like to stop here, you can simply enter {\bf quit} in the Console +program, and you can stop Bacula with {\bf ./bacula stop}. To clean up, simply +delete the file {\bf /tmp/TestVolume001}, and you should also re-initialize +your database using: + +\footnotesize +\begin{verbatim} +./drop_bacula_tables +./make_bacula_tables +\end{verbatim} +\normalsize + +Please note that this will erase all information about the previous jobs that +have run, and that you might want to do it now while testing but that normally +you will not want to re-initialize your database. + +If you would like to try restoring the files that you just backed up, read the +following section. +\label{restoring} + +\section{Restoring Your Files} +\index[general]{Files!Restoring Your } +\index[general]{Restoring Your Files } + +If you have run the default configuration and the save of the Bacula source +code as demonstrated above, you can restore the backed up files in the Console +program by entering: + +\footnotesize +\begin{verbatim} +restore all +\end{verbatim} +\normalsize + +where you will get: + +\footnotesize +\begin{verbatim} +First you select one or more JobIds that contain files +to be restored. You will be presented several methods +of specifying the JobIds. Then you will be allowed to +select which files from those JobIds are to be restored. + +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +As you can see, there are a number of options, but for the current +demonstration, please enter {\bf 5} to do a restore of the last backup you +did, and you will get the following output: + +\footnotesize +\begin{verbatim} +Defined Clients: + 1: rufus-fd +Item 1 selected automatically. +The defined FileSet resources are: + 1: 1 Full Set 2003-04-28 14:22:33 +Item 1 selected automatically. ++-------+-------+----------+---------------------+---------------+ +| JobId | Level | JobFiles | StartTime | VolumeName | ++-------+-------+----------+---------------------+---------------+ +| 1 | F | 1444 | 2003-04-28 14:22:33 | TestVolume002 | ++-------+-------+----------+---------------------+---------------+ +You have selected the following JobId: 1 +Building directory tree for JobId 1 ... +1 Job inserted into the tree and marked for extraction. +The defined Storage resources are: + 1: File +Item 1 selected automatically. +You are now entering file selection mode where you add and +remove files to be restored. All files are initially added. +Enter "done" to leave this mode. +cwd is: / +$ +\end{verbatim} +\normalsize + +where I have truncated the listing on the right side to make it more readable. +As you can see by starting at the top of the listing, Bacula knows what client +you have, and since there was only one, it selected it automatically, likewise +for the FileSet. Then Bacula produced a listing containing all the jobs that +form the current backup, in this case, there is only one, and the Storage +daemon was also automatically chosen. Bacula then took all the files that were +in Job number 1 and entered them into a {\bf directory tree} (a sort of in +memory representation of your filesystem). At this point, you can use the {\bf +cd} and {\bf ls} ro {\bf dir} commands to walk up and down the directory tree +and view what files will be restored. For example, if I enter {\bf cd +/home/kern/bacula/bacula-1.30} and then enter {\bf dir} I will get a listing +of all the files in the Bacula source directory. On your system, the path will +be somewhat different. For more information on this, please refer to the +\ilink{Restore Command Chapter}{RestoreChapter} of this manual for +more details. + +To exit this mode, simply enter: + +\footnotesize +\begin{verbatim} +done +\end{verbatim} +\normalsize + +and you will get the following output: + +\footnotesize +\begin{verbatim} +Bootstrap records written to + /home/kern/bacula/testbin/working/restore.bsr +The restore job will require the following Volumes: + + TestVolume001 +1444 files selected to restore. +Run Restore job +JobName: RestoreFiles +Bootstrap: /home/kern/bacula/testbin/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Full Set +Backup Client: rufus-fd +Restore Client: rufus-fd +Storage: File +JobId: *None* +When: 2005-04-28 14:53:54 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +If you answer {\bf yes} your files will be restored to {\bf +/tmp/bacula-restores}. If you want to restore the files to their original +locations, you must use the {\bf mod} option and explicitly set {\bf Where:} +to nothing (or to /). We recommend you go ahead and answer {\bf yes} and after +a brief moment, enter {\bf messages}, at which point you should get a listing +of all the files that were restored as well as a summary of the job that looks +similar to this: + +\footnotesize +\begin{verbatim} +28-Apr-2005 14:56 rufus-dir: Bacula 2.1.8 (08May07): 08-May-2007 14:56:06 +Build OS: i686-pc-linux-gnu suse 10.2 +JobId: 2 +Job: RestoreFiles.2007-05-08_14.56.06 +Restore Client: rufus-fd +Start time: 08-May-2007 14:56 +End time: 08-May-2007 14:56 +Files Restored: 1,444 +Bytes Restored: 38,816,381 +Rate: 9704.1 KB/s +FD Errors: 0 +FD termination status: OK +SD termination status: OK +Termination: Restore OK +08-May-2007 14:56 rufus-dir: Begin pruning Jobs. +08-May-2007 14:56 rufus-dir: No Jobs found to prune. +08-May-2007 14:56 rufus-dir: Begin pruning Files. +08-May-2007 14:56 rufus-dir: No Files found to prune. +08-May-2007 14:56 rufus-dir: End auto prune. +\end{verbatim} +\normalsize + +After exiting the Console program, you can examine the files in {\bf +/tmp/bacula-restores}, which will contain a small directory tree with all the +files. Be sure to clean up at the end with: + +\footnotesize +\begin{verbatim} +rm -rf /tmp/bacula-restore +\end{verbatim} +\normalsize + +\section{Quitting the Console Program} +\index[general]{Program!Quitting the Console } +\index[general]{Quitting the Console Program } + +Simply enter the command {\bf quit}. +\label{SecondClient} + +\section{Adding a Second Client} +\index[general]{Client!Adding a Second } +\index[general]{Adding a Second Client } + +If you have gotten the example shown above to work on your system, you may be +ready to add a second Client (File daemon). That is you have a second machine +that you would like backed up. The only part you need installed on the other +machine is the binary {\bf bacula-fd} (or {\bf bacula-fd.exe} for Windows) and +its configuration file {\bf bacula-fd.conf}. You can start with the same {\bf +bacula-fd.conf} file that you are currently using and make one minor +modification to it to create the conf file for your second client. Change the +File daemon name from whatever was configured, {\bf rufus-fd} in the example +above, but your system will have a different name. The best is to change it to +the name of your second machine. For example: + +\footnotesize +\begin{verbatim} +... +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = rufus-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /home/kern/bacula/working + Pid Directory = /var/run +} +... +\end{verbatim} +\normalsize + +would become: + +\footnotesize +\begin{verbatim} +... +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = matou-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /home/kern/bacula/working + Pid Directory = /var/run +} +... +\end{verbatim} +\normalsize + +where I show just a portion of the file and have changed {\bf rufus-fd} to +{\bf matou-fd}. The names you use are your choice. For the moment, I recommend +you change nothing else. Later, you will want to change the password. + +Now you should install that change on your second machine. Then you need to +make some additions to your Director's configuration file to define the new +File daemon or Client. Starting from our original example which should be +installed on your system, you should add the following lines (essentially +copies of the existing data but with the names changed) to your Director's +configuration file {\bf bacula-dir.conf}. + +\footnotesize +\begin{verbatim} +# +# Define the main nightly save backup job +# By default, this job will back up to disk in /tmp +Job { + Name = "Matou" + Type = Backup + Client = matou-fd + FileSet = "Full Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Write Bootstrap = "/home/kern/bacula/working/matou.bsr" +} +# Client (File Services) to backup +Client { + Name = matou-fd + Address = matou + FDPort = 9102 + Catalog = MyCatalog + Password = "xxxxx" # password for + File Retention = 30d # 30 days + Job Retention = 180d # six months + AutoPrune = yes # Prune expired Jobs/Files +} +\end{verbatim} +\normalsize + +Then make sure that the Address parameter in the Storage resource is set to +the fully qualified domain name and not to something like "localhost". The +address specified is sent to the File daemon (client) and it must be a fully +qualified domain name. If you pass something like "localhost" it will not +resolve correctly and will result in a time out when the File daemon fails to +connect to the Storage daemon. + +That is all that is necessary. I copied the existing resource to create a +second Job (Matou) to backup the second client (matou-fd). It has the name +{\bf Matou}, the Client is named {\bf matou-fd}, and the bootstrap file name +is changed, but everything else is the same. This means that Matou will be +backed up on the same schedule using the same set of tapes. You may want to +change that later, but for now, let's keep it simple. + +The second change was to add a new Client resource that defines {\bf matou-fd} +and has the correct address {\bf matou}, but in real life, you may need a +fully qualified domain name or an IP address. I also kept the password the +same (shown as xxxxx for the example). + +At this point, if you stop Bacula and restart it, and start the Client on the +other machine, everything will be ready, and the prompts that you saw above +will now include the second machine. + +To make this a real production installation, you will possibly want to use +different Pool, or a different schedule. It is up to you to customize. In any +case, you should change the password in both the Director's file and the +Client's file for additional security. + +For some important tips on changing names and passwords, and a diagram of what +names and passwords must match, please see +\ilink{Authorization Errors}{AuthorizationErrors} in the FAQ chapter +of this manual. + +\section{When The Tape Fills} +\label{FullTape} +\index[general]{Fills!When The Tape } +\index[general]{When The Tape Fills } + +If you have scheduled your job, typically nightly, there will come a time when +the tape fills up and {\bf Bacula} cannot continue. In this case, Bacula will +send you a message similar to the following: + +\footnotesize +\begin{verbatim} +rufus-sd: block.c:337 === Write error errno=28: ERR=No space left + on device +\end{verbatim} +\normalsize + +This indicates that Bacula got a write error because the tape is full. Bacula +will then search the Pool specified for your Job looking for an appendable +volume. In the best of all cases, you will have properly set your Retention +Periods and you will have all your tapes marked to be Recycled, and {\bf +Bacula} will automatically recycle the tapes in your pool requesting and +overwriting old Volumes. For more information on recycling, please see the +\ilink{Recycling chapter}{RecyclingChapter} of this manual. If you +find that your Volumes were not properly recycled (usually because of a +configuration error), please see the +\ilink{Manually Recycling Volumes}{manualrecycling} section of +the Recycling chapter. + +If like me, you have a very large set of Volumes and you label them with the +date the Volume was first writing, or you have not set up your Retention +periods, Bacula will not find a tape in the pool, and it will send you a +message similar to the following: + +\footnotesize +\begin{verbatim} +rufus-sd: Job kernsave.2002-09-19.10:50:48 waiting. Cannot find any + appendable volumes. +Please use the "label" command to create a new Volume for: + Storage: SDT-10000 + Media type: DDS-4 + Pool: Default +\end{verbatim} +\normalsize + +Until you create a new Volume, this message will be repeated an hour later, +then two hours later, and so on doubling the interval each time up to a +maximum interval of one day. + +The obvious question at this point is: What do I do now? + +The answer is simple: first, using the Console program, close the tape drive +using the {\bf unmount} command. If you only have a single drive, it will be +automatically selected, otherwise, make sure you release the one specified on +the message (in this case {\bf STD-10000}). + +Next, you remove the tape from the drive and insert a new blank tape. Note, on +some older tape drives, you may need to write an end of file mark ({\bf mt \ +-f \ /dev/nst0 \ weof}) to prevent the drive from running away when Bacula +attempts to read the label. + +Finally, you use the {\bf label} command in the Console to write a label to +the new Volume. The {\bf label} command will contact the Storage daemon to +write the software label, if it is successful, it will add the new Volume to +the Pool, then issue a {\bf mount} command to the Storage daemon. See the +previous sections of this chapter for more details on labeling tapes. + +The result is that Bacula will continue the previous Job writing the backup to +the new Volume. + +If you have a Pool of volumes and Bacula is cycling through them, instead of +the above message "Cannot find any appendable volumes.", Bacula may ask you +to mount a specific volume. In that case, you should attempt to do just that. +If you do not have the volume any more (for any of a number of reasons), you +can simply mount another volume from the same Pool, providing it is +appendable, and Bacula will use it. You can use the {\bf list volumes} command +in the console program to determine which volumes are appendable and which are +not. + +If like me, you have your Volume retention periods set correctly, but you have +no more free Volumes, you can relabel and reuse a Volume as follows: + +\begin{itemize} +\item Do a {\bf list volumes} in the Console and select the oldest Volume for + relabeling. +\item If you have setup your Retention periods correctly, the Volume should + have VolStatus {\bf Purged}. +\item If the VolStatus is not set to Purged, you will need to purge the + database of Jobs that are written on that Volume. Do so by using the command + {\bf purge jobs volume} in the Console. If you have multiple Pools, you will +be prompted for the Pool then enter the VolumeName (or MediaId) when +requested. +\item Then simply use the {\bf relabel} command to relabel the Volume. + \end{itemize} + +To manually relabel the Volume use the following additional steps: + +\begin{itemize} +\item To delete the Volume from the catalog use the {\bf delete volume} + command in the Console and select the VolumeName (or MediaId) to be deleted. + +\item Use the {\bf unmount} command in the Console to unmount the old tape. +\item Physically relabel the old Volume that you deleted so that it can be + reused. +\item Insert the old Volume in the tape drive. +\item From a command line do: {\bf mt \ -f \ /dev/st0 \ rewind} and {\bf mt \ + -f \ /dev/st0 \ weof}, where you need to use the proper tape drive name for + your system in place of {\bf /dev/st0}. +\item Use the {\bf label} command in the Console to write a new Bacula label + on your tape. +\item Use the {\bf mount} command in the Console if it is not automatically + done, so that Bacula starts using your newly labeled tape. + \end{itemize} + +\section{Other Useful Console Commands} +\index[general]{Commands!Other Useful Console } +\index[general]{Other Useful Console Commands } + +\begin{description} + +\item [status dir] + \index[console]{status dir } + Print a status of all running jobs and jobs scheduled in the next 24 hours. + +\item [status] + \index[console]{status } + The console program will prompt you to select a daemon type, then will +request the daemon's status. + +\item [status jobid=nn] + \index[console]{status jobid } + Print a status of JobId nn if it is running. The Storage daemon is contacted +and requested to print a current status of the job as well. + +\item [list pools] + \index[console]{list pools } + List the pools defined in the Catalog (normally only Default is used). + +\item [list media] + \index[console]{list media } + Lists all the media defined in the Catalog. + +\item [list jobs] + \index[console]{list jobs } + Lists all jobs in the Catalog that have run. + +\item [list jobid=nn] + \index[console]{list jobid } + Lists JobId nn from the Catalog. + +\item [list jobtotals] + \index[console]{list jobtotals } + Lists totals for all jobs in the Catalog. + +\item [list files jobid=nn] + \index[console]{list files jobid } + List the files that were saved for JobId nn. + +\item [list jobmedia] + \index[console]{list jobmedia } + List the media information for each Job run. + +\item [messages] + \index[console]{messages } + Prints any messages that have been directed to the console. + +\item [unmount storage=storage-name] + \index[console]{unmount storage } + Unmounts the drive associated with the storage device with the name {\bf +storage-name} if the drive is not currently being used. This command is used +if you wish Bacula to free the drive so that you can use it to label a tape. + + +\item [mount storage=storage-name] + \index[sd]{mount storage } + Causes the drive associated with the storage device to be mounted again. When +Bacula reaches the end of a volume and requests you to mount a new volume, +you must issue this command after you have placed the new volume in the +drive. In effect, it is the signal needed by Bacula to know to start reading +or writing the new volume. + +\item [quit] + \index[sd]{quit } + Exit or quit the console program. +\end{description} + +Most of the commands given above, with the exception of {\bf list}, will +prompt you for the necessary arguments if you simply enter the command name. + +\section{Debug Daemon Output} +\index[general]{Debug Daemon Output } +\index[general]{Output!Debug Daemon } + +If you want debug output from the daemons as they are running, start the +daemons from the install directory as follows: + +\footnotesize +\begin{verbatim} +./bacula start -d100 +\end{verbatim} +\normalsize + +This can be particularly helpful if your daemons do not start correctly, +because direct daemon output to the console is normally directed to the +NULL device, but with the debug level greater than zero, the output +will be sent to the starting terminal. + +To stop the three daemons, enter the following from the install directory: + +\footnotesize +\begin{verbatim} +./bacula stop +\end{verbatim} +\normalsize + +The execution of {\bf bacula stop} may complain about pids not found. This is +OK, especially if one of the daemons has died, which is very rare. + +To do a full system save, each File daemon must be running as root so that it +will have permission to access all the files. None of the other daemons +require root privileges. However, the Storage daemon must be able to open the +tape drives. On many systems, only root can access the tape drives. Either run +the Storage daemon as root, or change the permissions on the tape devices to +permit non-root access. MySQL and PostgreSQL can be installed and run with any +userid; root privilege is not necessary. + +\section{Patience When Starting Daemons or Mounting Blank Tapes} + +When you start the Bacula daemons, the Storage daemon attempts to open all +defined storage devices and verify the currently mounted Volume (if +configured). Until all the storage devices are verified, the Storage daemon +will not accept connections from the Console program. If a tape was previously +used, it will be rewound, and on some devices this can take several minutes. +As a consequence, you may need to have a bit of patience when first contacting +the Storage daemon after starting the daemons. If you can see your tape drive, +once the lights stop flashing, the drive will be ready to be used. + +The same considerations apply if you have just mounted a blank tape in a drive +such as an HP DLT. It can take a minute or two before the drive properly +recognizes that the tape is blank. If you attempt to {\bf mount} the tape with +the Console program during this recognition period, it is quite possible that +you will hang your SCSI driver (at least on my Red Hat Linux system). As a +consequence, you are again urged to have patience when inserting blank tapes. +Let the device settle down before attempting to access it. + +\section{Difficulties Connecting from the FD to the SD} +\index[general]{Difficulties Connecting from the FD to the SD} +\index[general]{SD!Difficulties Connecting from the FD to the SD} + +If you are having difficulties getting one or more of your File daemons to +connect to the Storage daemon, it is most likely because you have not used a +fully qualified domain name on the {\bf Address} directive in the +Director's Storage resource. That is the resolver on the File daemon's machine +(not on the Director's) must be able to resolve the name you supply into an IP +address. An example of an address that is guaranteed not to work: {\bf +localhost}. An example that may work: {\bf megalon}. An example that is more +likely to work: {\bf magalon.mydomain.com}. On Win32 if you don't have a good +resolver (often true on older Win98 systems), you might try using an IP +address in place of a name. + +If your address is correct, then make sure that no other program is using the +port 9103 on the Storage daemon's machine. The Bacula port number are +authorized by IANA, and should not be used by other programs, but apparently +some HP printers do use these port numbers. A {\bf netstat -a} on the Storage +daemon's machine can determine who is using the 9103 port (used for FD to SD +communications in Bacula). + +\section{Daemon Command Line Options} +\index[general]{Daemon Command Line Options } +\index[general]{Options!Daemon Command Line } + +Each of the three daemons (Director, File, Storage) accepts a small set of +options on the command line. In general, each of the daemons as well as the +Console program accepts the following options: + +\begin{description} + +\item [-c \lt{}file\gt{}] + \index[sd]{-c \lt{}file\gt{} } + Define the file to use as a configuration file. The default is the daemon + name followed by {\bf .conf} i.e. {\bf bacula-dir.conf} for the Director, + {\bf bacula-fd.conf} for the File daemon, and {\bf bacula-sd} for the Storage + daemon. + +\item [-d nn] + \index[sd]{-d nn } + Set the debug level to {\bf nn}. Higher levels of debug cause more + information to be displayed on STDOUT concerning what the daemon is doing. + +\item [-f] + Run the daemon in the foreground. This option is needed to run the daemon + under the debugger. + +\item [-s] + Do not trap signals. This option is needed to run the daemon under the + debugger. + +\item [-t] + Read the configuration file and print any error messages, then immediately + exit. Useful for syntax testing of new configuration files. + +\item [-v] + Be more verbose or more complete in printing error and informational + messages. Recommended. + +\item [-?] + Print the version and list of options. + \end{description} + +The Director has the following additional Director specific option: + +\begin{description} + +\item [-r \lt{}job\gt{}] + \index[fd]{-r \lt{}job\gt{} } + Run the named job immediately. This is for debugging and should not be used. + +\end{description} + +The File daemon has the following File daemon specific option: + +\begin{description} + +\item [-i] + Assume that the daemon is called from {\bf inetd} or {\bf xinetd}. In this + case, the daemon assumes that a connection has already been made and that it +is passed as STDIN. After the connection terminates the daemon will exit. +\end{description} + +The Storage daemon has no Storage daemon specific options. + +The Console program has no console specific options. + +\section{Creating a Pool} +\label{Pool} +\index[general]{Pool!Creating a } +\index[general]{Creating a Pool } + +Creating the Pool is automatically done when {\bf Bacula} starts, so if you +understand Pools, you can skip to the next section. + +When you run a job, one of the things that Bacula must know is what Volumes to +use to backup the FileSet. Instead of specifying a Volume (tape) directly, you +specify which Pool of Volumes you want Bacula to consult when it wants a tape +for writing backups. Bacula will select the first available Volume from the +Pool that is appropriate for the Storage device you have specified for the Job +being run. When a volume has filled up with data, {\bf Bacula} will change its +VolStatus from {\bf Append} to {\bf Full}, and then {\bf Bacula} will use the +next volume and so on. If no appendable Volume exists in the Pool, the +Director will attempt to recycle an old Volume, if there are still no +appendable Volumes available, {\bf Bacula} will send a message requesting the +operator to create an appropriate Volume. + +{\bf Bacula} keeps track of the Pool name, the volumes contained in the Pool, +and a number of attributes of each of those Volumes. + +When Bacula starts, it ensures that all Pool resource definitions have been +recorded in the catalog. You can verify this by entering: + +\footnotesize +\begin{verbatim} +list pools +\end{verbatim} +\normalsize + +to the console program, which should print something like the following: + +\footnotesize +\begin{verbatim} +*list pools +Using default Catalog name=MySQL DB=bacula ++--------+---------+---------+---------+----------+-------------+ +| PoolId | Name | NumVols | MaxVols | PoolType | LabelFormat | ++--------+---------+---------+---------+----------+-------------+ +| 1 | Default | 3 | 0 | Backup | * | +| 2 | File | 12 | 12 | Backup | File | ++--------+---------+---------+---------+----------+-------------+ +* +\end{verbatim} +\normalsize + +If you attempt to create the same Pool name a second time, {\bf Bacula} will +print: + +\footnotesize +\begin{verbatim} +Error: Pool Default already exists. +Once created, you may use the {\bf update} command to +modify many of the values in the Pool record. +\end{verbatim} +\normalsize + +\label{Labeling} + +\section{Labeling Your Volumes} +\index[general]{Volumes!Labeling Your } +\index[general]{Labeling Your Volumes } + +Bacula requires that each Volume contains a software label. There are several +strategies for labeling volumes. The one I use is to label them as they are +needed by {\bf Bacula} using the console program. That is when Bacula needs a +new Volume, and it does not find one in the catalog, it will send me an email +message requesting that I add Volumes to the Pool. I then use the {\bf label} +command in the Console program to label a new Volume and to define it in the +Pool database, after which Bacula will begin writing on the new Volume. +Alternatively, I can use the Console {\bf relabel} command to relabel a Volume +that is no longer used providing it has VolStatus {\bf Purged}. + +Another strategy is to label a set of volumes at the start, then use them as +{\bf Bacula} requests them. This is most often done if you are cycling through +a set of tapes, for example using an autochanger. For more details on +recycling, please see the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +If you run a Bacula job, and you have no labeled tapes in the Pool, Bacula +will inform you, and you can create them "on-the-fly" so to speak. In my +case, I label my tapes with the date, for example: {\bf DLT-18April02}. See +below for the details of using the {\bf label} command. + +\section{Labeling Volumes with the Console Program} +\index[general]{Labeling Volumes with the Console Program } +\index[general]{Program!Labeling Volumes with the Console } + +Labeling volumes is normally done by using the console program. + +\begin{enumerate} +\item ./bconsole +\item label + \end{enumerate} + +If Bacula complains that you cannot label the tape because it is already +labeled, simply {\bf unmount} the tape using the {\bf unmount} command in the +console, then physically mount a blank tape and re-issue the {\bf label} +command. + +Since the physical storage media is different for each device, the {\bf label} +command will provide you with a list of the defined Storage resources such as +the following: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File + 2: 8mmDrive + 3: DLTDrive + 4: SDT-10000 +Select Storage resource (1-4): +\end{verbatim} +\normalsize + +At this point, you should have a blank tape in the drive corresponding to the +Storage resource that you select. + +It will then ask you for the Volume name. + +\footnotesize +\begin{verbatim} +Enter new Volume name: +\end{verbatim} +\normalsize + +If Bacula complains: + +\footnotesize +\begin{verbatim} +Media record for Volume xxxx already exists. +\end{verbatim} +\normalsize + +It means that the volume name {\bf xxxx} that you entered already exists in +the Media database. You can list all the defined Media (Volumes) with the {\bf +list media} command. Note, the LastWritten column has been truncated for +proper printing. + +\footnotesize +\begin{verbatim} ++---------------+---------+--------+----------------+-----/~/-+------------+-----+ +| VolumeName | MediaTyp| VolStat| VolBytes | LastWri | VolReten | Recy| ++---------------+---------+--------+----------------+---------+------------+-----+ +| DLTVol0002 | DLT8000 | Purged | 56,128,042,217 | 2001-10 | 31,536,000 | 0 | +| DLT-07Oct2001 | DLT8000 | Full | 56,172,030,586 | 2001-11 | 31,536,000 | 0 | +| DLT-08Nov2001 | DLT8000 | Full | 55,691,684,216 | 2001-12 | 31,536,000 | 0 | +| DLT-01Dec2001 | DLT8000 | Full | 55,162,215,866 | 2001-12 | 31,536,000 | 0 | +| DLT-28Dec2001 | DLT8000 | Full | 57,888,007,042 | 2002-01 | 31,536,000 | 0 | +| DLT-20Jan2002 | DLT8000 | Full | 57,003,507,308 | 2002-02 | 31,536,000 | 0 | +| DLT-16Feb2002 | DLT8000 | Full | 55,772,630,824 | 2002-03 | 31,536,000 | 0 | +| DLT-12Mar2002 | DLT8000 | Full | 50,666,320,453 | 1970-01 | 31,536,000 | 0 | +| DLT-27Mar2002 | DLT8000 | Full | 57,592,952,309 | 2002-04 | 31,536,000 | 0 | +| DLT-15Apr2002 | DLT8000 | Full | 57,190,864,185 | 2002-05 | 31,536,000 | 0 | +| DLT-04May2002 | DLT8000 | Full | 60,486,677,724 | 2002-05 | 31,536,000 | 0 | +| DLT-26May02 | DLT8000 | Append | 1,336,699,620 | 2002-05 | 31,536,000 | 1 | ++---------------+---------+--------+----------------+-----/~/-+------------+-----+ +\end{verbatim} +\normalsize + +Once Bacula has verified that the volume does not already exist, it will +prompt you for the name of the Pool in which the Volume (tape) is to be +created. If there is only one Pool (Default), it will be automatically +selected. + +If the tape is successfully labeled, a Volume record will also be created in +the Pool. That is the Volume name and all its other attributes will appear +when you list the Pool. In addition, that Volume will be available for backup +if the MediaType matches what is requested by the Storage daemon. + +When you labeled the tape, you answered very few questions about it -- +principally the Volume name, and perhaps the Slot. However, a Volume record in +the catalog database (internally known as a Media record) contains quite a few +attributes. Most of these attributes will be filled in from the default values +that were defined in the Pool (i.e. the Pool holds most of the default +attributes used when creating a Volume). + +It is also possible to add media to the pool without physically labeling the +Volumes. This can be done with the {\bf add} command. For more information, +please see the +\ilink{Console Chapter}{_ConsoleChapter} of this manual. diff --git a/docs/manuals/de/concepts/update_version b/docs/manuals/de/concepts/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/concepts/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/concepts/update_version.in b/docs/manuals/de/concepts/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/concepts/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/concepts/uploaddoc b/docs/manuals/de/concepts/uploaddoc new file mode 100755 index 00000000..02668a12 --- /dev/null +++ b/docs/manuals/de/concepts/uploaddoc @@ -0,0 +1,11 @@ +#!/bin/sh + +ftp -i ftp.sectoor.de <out + type out +\end{verbatim} +\normalsize + +The precise path to bacula-fd depends on where it is installed. The +example above is the default used in 1.39.22 and later. +The {\bf -t} option will cause Bacula to read the configuration file, print +any error messages and then exit. the {\bf \gt{}} redirects the output to the +file named {\bf out}, which you can list with the {\bf type} command. + +If something is going wrong later, or you want to run {\bf Bacula} with a +debug option, you might try starting it as: + +\footnotesize +\begin{verbatim} + c:\Program Files\bacula\bin\bacula-fd -d 100 >out +\end{verbatim} +\normalsize + +In this case, Bacula will run until you explicitly stop it, which will give +you a chance to connect to it from your Unix/Linux server. In later versions +of Bacula (1.34 on, I think), when you start the File daemon in debug mode it +can write the output to a trace file {\bf bacula.trace} in the current +directory. To enable this, before running a job, use the console, and enter: + +\footnotesize +\begin{verbatim} + trace on +\end{verbatim} +\normalsize + +then run the job, and once you have terminated the File daemon, you will find +the debug output in the {\bf bacula.trace} file, which will probably be +located in the same directory as bacula-fd.exe. + +In addition, you should look in the System Applications log on the Control +Panel to find any Windows errors that Bacula got during the startup process. + +Finally, due to the above problems, when you turn on debugging, and specify +trace=1 on a setdebug command in the Console, Bacula will write the debug +information to the file {\bf bacula.trace} in the directory from which Bacula +is executing. + +If you are having problems with ClientRunBeforeJob scripts randomly dying, +it is possible that you have run into an Oracle bug. See bug number 622 in +the bugs.bacula.org database. The following information has been +provided by a user on this issue: + +\footnotesize +\begin{verbatim} +The information in this document applies to: + Oracle HTTP Server - Version: 9.0.4 + Microsoft Windows Server 2003 + Symptoms + When starting an OC4J instance, the System Clock runs faster, about 7 +seconds per minute. + + Cause + + + This is caused by the Sun JVM bug 4500388, which states that "Calling +Thread.sleep() with a small argument affects the system clock". Although +this is reported as fixed in JDK 1.4.0_02, several reports contradict this +(see the bug in +http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4500388). + + + Also reported by Microsoft as "The system clock may run fast when you +use the ACPI power management timer as a high-resolution counter on Windows +2000-based computers" (See http://support.microsoft.com/?id=821893) +\end{verbatim} +\normalsize + +You may wish to start the daemon with debug mode on rather than doing it +using bconsole. To do so, edit the following registry key: + +\footnotesize +\begin{verbatim} +HKEY_LOCAL_MACHINE\HARDWARE\SYSTEM\CurrentControlSet\Services\Bacula-dir +\end{verbatim} +\normalsize + +using regedit, then add -dnn after the /service option, where nn represents +the debug level you want. + +\label{Compatibility} +\section{Windows Compatibility Considerations} +\index[general]{Windows Compatibility Considerations} +\index[general]{Considerations!Windows Compatibility} + +If you are not using the VSS (Volume Shadow Copy) option described in the +next section of this chapter, and if any applications are running during +the backup and they have files opened exclusively, Bacula will not be able +to backup those files, so be sure you close your applications (or tell your +users to close their applications) before the backup. Fortunately, most +Microsoft applications do not open files exclusively so that they can be +backed up. However, you will need to experiment. In any case, if Bacula +cannot open the file, it will print an error message, so you will always +know which files were not backed up. For version 1.37.25 and greater, see +the section below on Volume Shadow Copy Service that permits backing up any +file. + +During backup, Bacula doesn't know about the system registry, so you will +either need to write it out to an ASCII file using {\bf regedit~~/e} or use a +program specifically designed to make a copy or backup the registry. + +In Bacula version 1.31 and later, we use Windows backup API calls by +default. Typical of Windows, programming these special BackupRead and +BackupWrite calls is a real nightmare of complications. The end result +gives some distinct advantages and some disadvantages. + +First, the advantages are that on WinNT/2K/XP systems, the security and +ownership information is now backed up. In addition, with the exception of +files in exclusive use by another program, Bacula can now access all system +files. This means that when you restore files, the security and ownership +information will be restored on WinNT/2K/XP along with the data. + +The disadvantage of the Windows backup API calls is that it produces +non-portable backups. That is files and their data that are backed up on +WinNT using the native API calls (BackupRead/BackupWrite) cannot be +restored on Win95/98/Me or Unix systems. In principle, a file backed up on +WinNT can be restored on WinXP, but this remains to be seen in practice +(not yet tested). In addition, the stand-alone tools such as {\bf bls} and +{\bf bextract} cannot be used to retrieve the data for those files because +those tools are not available on Windows. All restores must use the Bacula +{\bf restore} command. As of Bacula 1.39.x, thanks to Thorsten Engel, this +restriction is removed, and Bacula should be able to read non-portable +backups on any system and restore the data appropriately. However, +on a system that does not have the BackupRead/BackupWrite calls (older +Windows versions and all Unix/Linux machines), though the file data +can be restored, the Windows security and access control data will not be restored. +This means that a standard set of access permissions will be set for +such restored files. + + +As a default, Bacula backs up Windows systems using the Windows API calls. +If you want to backup data on a WinNT/2K/XP system and restore it on a +Unix/Win95/98/Me system, we have provided a special {\bf portable} option +that backs up the data in a portable fashion by using portable API calls. +See the \ilink{portable option}{portable} on the Include statement in a +FileSet resource in the Director's configuration chapter for the details on +setting this option. However, using the portable option means you may have +permissions problems accessing files, and none of the security and +ownership information will be backed up or restored. The file data can, +however, be restored on any system. + +You should always be able to restore any file backed up on Unix or Win95/98/Me +to any other system. On some systems, such as WinNT/2K/XP, you may have to +reset the ownership of such restored files. Any file backed up on WinNT/2K/XP +should in principle be able to be restored to a similar system (i.e. +WinNT/2K/XP), however, I am unsure of the consequences if the owner +information and accounts are not identical on both systems. Bacula will not +let you restore files backed up on WinNT/2K/XP to any other system (i.e. Unix +Win95/98/Me) if you have used the defaults. + +Finally, if you specify the {\bf portable=yes} option on the files you back +up. Bacula will be able to restore them on any other system. However, any +WinNT/2K/XP specific security and ownership information will be lost. + +The following matrix will give you an idea of what you can expect. Thanks to +Marc Brueckner for doing the tests: + +\addcontentsline{lot}{table}{WinNT/2K/XP Restore Portability Status} +\begin{longtable}{|l|l|p{2.8in}|} + \hline +\multicolumn{1}{|c|}{\bf Backup OS} & \multicolumn{1}{c|}{\bf Restore OS} +& \multicolumn{1}{c|}{\bf Results } \\ + \hline {WinMe} & {WinMe} & {Works } \\ + \hline {WinMe} & {WinNT} & {Works (SYSTEM permissions) } \\ + \hline {WinMe} & {WinXP} & {Works (SYSTEM permissions) } \\ + \hline {WinMe} & {Linux} & {Works (SYSTEM permissions) } \\ + \hline {\ } & {\ } & {\ } \\ + \hline {WinXP} & {WinXP} & {Works } \\ + \hline {WinXP} & {WinNT} & {Works (all files OK, but got "The data is invalid" +message) } \\ + \hline {WinXP} & {WinMe} & {Error: Win32 data stream not supported. } \\ + \hline {WinXP} & {WinMe} & {Works if {\bf Portable=yes} specified during backup.} \\ + \hline {WinXP} & {Linux} & {Error: Win32 data stream not supported. } \\ + \hline {WinXP} & {Linux} & {Works if {\bf Portable=yes} specified during backup.}\\ + \hline {\ } & {\ } & {\ } \\ + \hline {WinNT} & {WinNT} & {Works } \\ + \hline {WinNT} & {WinXP} & {Works } \\ + \hline {WinNT} & {WinMe} & {Error: Win32 data stream not supported. } \\ + \hline {WinNT} & {WinMe} & {Works if {\bf Portable=yes} specified during backup.}\\ + \hline {WinNT} & {Linux} & {Error: Win32 data stream not supported. } \\ + \hline {WinNT} & {Linux} & {Works if {\bf Portable=yes} specified during backup. }\\ + \hline {\ } & {\ } & {\ } \\ + \hline {Linux} & {Linux} & {Works } \\ + \hline {Linux} & {WinNT} & {Works (SYSTEM permissions) } \\ + \hline {Linux} & {WinMe} & {Works } \\ + \hline {Linux} & {WinXP} & {Works (SYSTEM permissions)} +\\ \hline +\end{longtable} + +Note: with Bacula versions 1.39.x and later, non-portable Windows data can +be restore to any machine. + + +\label{VSS} +\section{Volume Shadow Copy Service} +\index[general]{Volume Shadow Copy Service} +\index[general]{VSS} +In version 1.37.30 and greater, you can turn on Microsoft's Volume +Shadow Copy Service (VSS). + +Microsoft added VSS to Windows XP and Windows 2003. From the perspective of +a backup-solution for Windows, this is an extremely important step. VSS +allows Bacula to backup open files and even to interact with applications like +RDBMS to produce consistent file copies. VSS aware applications are called +VSS Writers, they register with the OS so that when Bacula wants to do a +Snapshot, the OS will notify the register Writer programs, which may then +create a consistent state in their application, which will be backed up. +Examples for these writers are "MSDE" (Microsoft database +engine), "Event Log Writer", "Registry Writer" plus 3rd +party-writers. If you have a non-vss aware application (e.g. +SQL Anywhere or probably MySQL), a shadow copy is still generated +and the open files can be backed up, but there is no guarantee +that the file is consistent. + +Bacula produces a message from each of the registered writer programs +when it is doing a VSS backup so you know which ones are correctly backed +up. + +Bacula supports VSS on both Windows 2003 and Windows XP. +Technically Bacula creates a shadow copy as soon as the backup process +starts. It does then backup all files from the shadow copy and destroys the +shadow copy after the backup process. Please have in mind, that VSS +creates a snapshot and thus backs up the system at the state it had +when starting the backup. It will disregard file changes which occur during +the backup process. + +VSS can be turned on by placing an + +\index[dir]{Enable VSS} +\index[general]{Enable VSS} +\begin{verbatim} +Enable VSS = yes +\end{verbatim} + +in your FileSet resource. + +The VSS aware File daemon has the letters VSS on the signon line that +it produces when contacted by the console. For example: +\begin{verbatim} +Tibs-fd Version: 1.37.32 (22 July 2005) VSS Windows XP MVS NT 5.1.2600 +\end{verbatim} +the VSS is shown in the line above. This only means that the File daemon +is capable of doing VSS not that VSS is turned on for a particular backup. +There are two ways of telling if VSS is actually turned on during a backup. +The first is to look at the status output for a job, e.g.: +\footnotesize +\begin{verbatim} +Running Jobs: +JobId 1 Job NightlySave.2005-07-23_13.25.45 is running. + VSS Backup Job started: 23-Jul-05 13:25 + Files=70,113 Bytes=3,987,180,650 Bytes/sec=3,244,247 + Files Examined=75,021 + Processing file: c:/Documents and Settings/kern/My Documents/My Pictures/Misc1/Sans titre - 39.pdd + SDReadSeqNo=5 fd=352 +\end{verbatim} +\normalsize +Here, you see under Running Jobs that JobId 1 is "VSS Backup Job started ..." +This means that VSS is enabled for that job. If VSS is not enabled, it will +simply show "Backup Job started ..." without the letters VSS. + +The second way to know that the job was backed up with VSS is to look at the +Job Report, which will look something like the following: +\footnotesize +\begin{verbatim} +23-Jul 13:25 rufus-dir: Start Backup JobId 1, Job=NightlySave.2005-07-23_13.25.45 +23-Jul 13:26 rufus-sd: Wrote label to prelabeled Volume "TestVolume001" on device "DDS-4" (/dev/nst0) +23-Jul 13:26 rufus-sd: Spooling data ... +23-Jul 13:26 Tibs: Generate VSS snapshots. Driver="VSS WinXP", Drive(s)="C" +23-Jul 13:26 Tibs: VSS Writer: "MSDEWriter", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "Microsoft Writer (Bootable State)", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "WMI Writer", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "Microsoft Writer (Service State)", State: 1 (VSS_WS_STABLE) +\end{verbatim} +\normalsize +In the above Job Report listing, you see that the VSS snapshot was generated for drive C (if +other drives are backed up, they will be listed on the {\bf Drive(s)="C"} You also see the +reports from each of the writer program. Here they all report VSS\_WS\_STABLE, which means +that you will get a consistent snapshot of the data handled by that writer. + +\section{VSS Problems} +\index[general]{Problems!VSS} +\index[fd] {Problems!VSS} +\index[general]{VSS Problems} +\index[fd]{VSS Problems} + +If you are experiencing problems such as VSS hanging on MSDE, first try +running {\bf vssadmin} to check for problems, then try running {\bf +ntbackup} which also uses VSS to see if it has similar problems. If so, you +know that the problem is in your Windows machine and not with Bacula. + +The FD hang problems were reported with {\bf MSDEwriter} when: +\begin{itemize} +\item a local firewall locked local access to the MSDE TCP port (MSDEwriter +seems to use TCP/IP and not Named Pipes). +\item msdtcs was installed to run under "localsystem": try running msdtcs +under networking account (instead of local system) (com+ seems to work +better with this configuration). +\end{itemize} + + +\section{Windows Firewalls} +\index[general]{Firewalls!Windows} +\index[general]{Windows Firewalls} + +If you turn on the firewalling feature on Windows (default in WinXP SP2), you +are likely to find that the Bacula ports are blocked and you cannot +communicate to the other daemons. This can be deactivated through the {\bf +Security Notification} dialog, which is apparently somewhere in the {\bf +Security Center}. I don't have this on my computer, so I cannot give the exact +details. + +The command: + +\footnotesize +\begin{verbatim} +netsh firewall set opmode disable +\end{verbatim} +\normalsize + +is purported to disable the firewall, but this command is not accepted on my +WinXP Home machine. + +\section{Windows Port Usage} +\index[general]{Windows Port Usage} +\index[general]{Usage!Windows Port} + +If you want to see if the File daemon has properly opened the port and is +listening, you can enter the following command in a shell window: + +\footnotesize +\begin{verbatim} + netstat -an | findstr 910[123] +\end{verbatim} +\normalsize + +TopView is another program that has been recommend, but it is not a +standard Win32 program, so you must find and download it from the Internet. + +\section{Windows Disaster Recovery} +\index[general]{Recovery!Windows Disaster} +\index[general]{Windows Disaster Recovery} + +We don't currently have a good solution for disaster recovery on Windows as we +do on Linux. The main piece lacking is a Windows boot floppy or a Windows boot +CD. Microsoft releases a Windows Pre-installation Environment ({\bf WinPE}) +that could possibly work, but we have not investigated it. This means that +until someone figures out the correct procedure, you must restore the OS from +the installation disks, then you can load a Bacula client and restore files. +Please don't count on using {\bf bextract} to extract files from your backup +tapes during a disaster recovery unless you have backed up those files using +the {\bf portable} option. {\bf bextract} does not run on Windows, and the +normal way Bacula saves files using the Windows API prevents the files from +being restored on a Unix machine. Once you have an operational Windows OS +loaded, you can run the File daemon and restore your user files. + +Please see +\ilink{ Disaster Recovery of Win32 Systems}{Win3233} for the latest +suggestion, which looks very promising. + +It looks like Bart PE Builder, which creates a Windows PE (Pre-installation +Environment) Boot-CD, may be just what is needed to build a complete disaster +recovery system for Win32. This distribution can be found at +\elink{http://www.nu2.nu/pebuilder/}{\url{http://www.nu2.nu/pebuilder/}}. + +\section{Windows Restore Problems} +\index[general]{Problems!Windows Restore} +\index[general]{Windows Restore Problems} +Please see the +\ilink{Restore Chapter}{Windows} of this manual for problems +that you might encounter doing a restore. + +section{Windows Backup Problems} +\index[general]{Problems!Windows Backup} +\index[general]{Windows Backup Problems} +If during a Backup, you get the message: +{\bf ERR=Access is denied} and you are using the portable option, +you should try both adding both the non-portable (backup API) and +the Volume Shadow Copy options to your Director's conf file. + +In the Options resource: +\footnotesize +\begin{verbatim} +portable = no +\end{verbatim} +\normalsize + +In the FileSet resource: +\footnotesize +\begin{verbatim} +enablevss = yes +\end{verbatim} +\normalsize + +In general, specifying these two options should allow you to backup +any file on a Windows system. However, in some cases, if users +have allowed to have full control of their folders, even system programs +such a Bacula can be locked out. In this case, you must identify +which folders or files are creating the problem and do the following: + +\begin{enumerate} +\item Grant ownership of the file/folder to the Administrators group, +with the option to replace the owner on all child objects. +\item Grant full control permissions to the Administrators group, +and change the user's group to only have Modify permission to +the file/folder and all child objects. +\end{enumerate} + +Thanks to Georger Araujo for the above information. + +\section{Windows Ownership and Permissions Problems} +\index[general]{Problems!Windows Ownership and Permissions} +\index[general]{Windows Ownership and Permissions Problems} + +If you restore files backed up from WinNT/XP/2K to an alternate directory, +Bacula may need to create some higher level directories that were not saved +(or restored). In this case, the File daemon will create them under the SYSTEM +account because that is the account that Bacula runs under as a service. As of +version 1.32f-3, Bacula creates these files with full access permission. +However, there may be cases where you have problems accessing those files even +if you run as administrator. In principle, Microsoft supplies you with the way +to cease the ownership of those files and thus change the permissions. +However, a much better solution to working with and changing Win32 permissions +is the program {\bf SetACL}, which can be found at +\elink{http://setacl.sourceforge.net/}{\url{http://setacl.sourceforge.net/}}. + +If you have not installed Bacula while running as Administrator +and if Bacula is not running as a Process with the userid (User Name) SYSTEM, +then it is very unlikely that it will have sufficient permission to +access all your files. + +Some users have experienced problems restoring files that participate in +the Active Directory. They also report that changing the userid under which +Bacula (bacula-fd.exe) runs, from SYSTEM to a Domain Admin userid, resolves +the problem. + + +\section{Manually resetting the Permissions} +\index[general]{Manually resetting the Permissions} +\index[general]{Permissions!Manually resetting the} + +The following solution was provided by Dan Langille \lt{}dan at langille in +the dot org domain\gt{}. The steps are performed using Windows 2000 Server but +they should apply to most Win32 platforms. The procedure outlines how to deal +with a problem which arises when a restore creates a top-level new directory. +In this example, "top-level" means something like {\bf +c:\textbackslash{}src}, not {\bf c:\textbackslash{}tmp\textbackslash{}src} +where {\bf c:\textbackslash{}tmp} already exists. If a restore job specifies / +as the {\bf Where:} value, this problem will arise. + +The problem appears as a directory which cannot be browsed with Windows +Explorer. The symptoms include the following message when you try to click on +that directory: + +\includegraphics{./access-is-denied.eps} + +If you encounter this message, the following steps will change the permissions +to allow full access. + +\begin{enumerate} +\item right click on the top level directory (in this example, {\bf c:/src}) + and select {\bf Properties}. +\item click on the Security tab. +\item If the following message appears, you can ignore it, and click on {\bf + OK}. + +\includegraphics{./view-only.eps} + +You should see something like this: + +\includegraphics{./properties-security.eps} +\item click on Advanced +\item click on the Owner tab +\item Change the owner to something other than the current owner (which is + {\bf SYSTEM} in this example as shown below). + +\includegraphics{./properties-security-advanced-owner.eps} +\item ensure the "Replace owner on subcontainers and objects" box is + checked +\item click on OK +\item When the message "You do not have permission to read the contents of + directory c:\textbackslash{}src\textbackslash{}basis. Do you wish to replace + the directory permissions with permissions granting you Full Control?", click +on Yes. + +\includegraphics{./confirm.eps} +\item Click on OK to close the Properties tab + \end{enumerate} + +With the above procedure, you should now have full control over your restored +directory. + +In addition to the above methods of changing permissions, there is a Microsoft +program named {\bf cacls} that can perform similar functions. + +\section{Backing Up the WinNT/XP/2K System State} +\index[general]{State!Backing Up the WinNT/XP/2K System} +\index[general]{Backing Up the WinNT/XP/2K System State} + +A suggestion by Damian Coutts using Microsoft's NTBackup utility in +conjunction with Bacula should permit a full restore of any damaged system +files on Win2K/XP. His suggestion is to do an NTBackup of the critical system +state prior to running a Bacula backup with the following command: + +\footnotesize +\begin{verbatim} +ntbackup backup systemstate /F c:\systemstate.bkf +\end{verbatim} +\normalsize + +The {\bf backup} is the command, the {\bf systemstate} says to backup only the +system state and not all the user files, and the {\bf /F +c:\textbackslash{}systemstate.bkf} specifies where to write the state file. +this file must then be saved and restored by Bacula. + +To restore the system state, you first reload a base operating system if the +OS is damaged, otherwise, this is not necessary, then you would use Bacula to +restore all the damaged or lost user's files and to recover the {\bf +c:\textbackslash{}systemstate.bkf} file. Finally if there are any damaged or +missing system files or registry problems, you run {\bf NTBackup} and {\bf +catalogue} the system statefile, and then select it for restore. The +documentation says you can't run a command line restore of the systemstate. + +To the best of my knowledge, this has not yet been tested. If you test it, +please report your results to the Bacula email list. + +\section{Considerations for Filename Specifications} +\index[general]{Windows!Considerations for Filename Specifications} + +Please see the +\ilink{Director's Configuration chapter}{win32} of this manual +for important considerations on how to specify Windows paths in Bacula FileSet +Include and Exclude directives. + +\index[general]{Unicode} +Bacula versions prior to 1.37.28 do not support Windows Unicode filenames. +As of that version, both {\bf bconsole} and {\bf bwx-console} support Windows +Unicode filenames. There may still be some problems with multiple byte +characters (e.g. Chinese, ...) where it is a two byte character but the +displayed character is not two characters wide. + +\index[general]{Win32 Path Length Restriction} +Path/filenames longer than 260 characters (up to 32,000) are supported +beginning with Bacula version 1.39.20. Older Bacula versions support +only 260 character path/filenames. + +\section{Win32 Specific File daemon Command Line} +\index[general]{Client!Win32 Specific File daemon Command Line Options} +\index[general]{Win32 Specific File daemon Command Line Options} + +These options are not normally seen or used by the user, and are documented +here only for information purposes. At the current time, to change the default +options, you must either manually run {\bf Bacula} or you must manually edit +the system registry and modify the appropriate entries. + +In order to avoid option clashes between the options necessary for {\bf +Bacula} to run on Windows and the standard Bacula options, all Windows +specific options are signaled with a forward slash character (/), while as +usual, the standard Bacula options are signaled with a minus (-), or a minus +minus (\verb:--:). All the standard Bacula options can be used on the Windows +version. In addition, the following Windows only options are implemented: + +\begin{description} + +\item [/service ] + \index[fd]{/service} + Start Bacula as a service + +\item [/run ] + \index[fd]{/run} + Run the Bacula application + +\item [/install ] + \index[fd]{/install} + Install Bacula as a service in the system registry + +\item [/remove ] + \index[fd]{/remove} + Uninstall Bacula from the system registry + +\item [/about ] + \index[fd]{/about} + Show the Bacula about dialogue box + +\item [/status ] + \index[fd]{/status} + Show the Bacula status dialogue box + +\item [/events ] + \index[fd]{/events} + Show the Bacula events dialogue box (not yet implemented) + +\item [/kill ] + \index[fd]{/kill} + Stop any running {\bf Bacula} + +\item [/help ] + \index[fd]{/help} + Show the Bacula help dialogue box +\end{description} + +It is important to note that under normal circumstances the user should never +need to use these options as they are normally handled by the system +automatically once Bacula is installed. However, you may note these options in +some of the .bat files that have been created for your use. + +\section{Shutting down Windows Systems} +\index[general]{Shutting down Windows Systems} +\index[general]{Systems!Shutting down Windows} + +Some users like to shutdown their Windows machines after a backup using a +Client Run After Job directive. If you want to do something similar, you might +take the shutdown program from the +\elink{apcupsd project}{\url{http://www.apcupsd.com}} or one from the +\elink{Sysinternals project} +{\url{http://www.sysinternals.com/ntw2k/freeware/psshutdown.shtml}}. diff --git a/docs/manuals/de/console/Makefile b/docs/manuals/de/console/Makefile new file mode 100644 index 00000000..9af2083b --- /dev/null +++ b/docs/manuals/de/console/Makefile @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=console + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Console and Operators Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Consol*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/console/Makefile.in b/docs/manuals/de/console/Makefile.in new file mode 100644 index 00000000..9af2083b --- /dev/null +++ b/docs/manuals/de/console/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=console + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Console and Operators Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Consol*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/console/bconsole.tex b/docs/manuals/de/console/bconsole.tex new file mode 100644 index 00000000..57b235ff --- /dev/null +++ b/docs/manuals/de/console/bconsole.tex @@ -0,0 +1,1615 @@ +%% +%% + +\chapter{Bacula Console} +\label{_ConsoleChapter} +\index[general]{Console!Bacula} +\index[general]{Bacula Console} +\index[general]{Console!Bacula} +\index[general]{Bacula Console} + +Die {\bf Bacula Console} (manchmal auch das BenutzerInterface genannt) +ist ein Programm, dass es dem Anwender oder System Aministrator erlaub, +den Bacula-Director-Dienst im laufenden Betrieb zu kontrollieren. + +Momentan gibt es zwei Versionen des Console-Programms: eine Shell- (TTY) +und eine GNOME GUI-Version. Beide erlauben es dem Administrator oder +autorisierten Benutzern Bacula zu steuern. Sie k\"{o}nnen sich den Status +eines bestimmten Jobs bzw. den Inhalt des Katalogs anzeigen lassen, +oder bestimmte Aktionen mit Tapes und Autochangern durchf\"{u}hren. + +Zus\"{a}tzlich gibt es noch die bwx-Console, die auf wxWidgets aufbaut, +und eine M\"{o}glichkeit bietet, den Wiederherstellungsproze{\ss} graphisch zu steuern. +Die bwx-Console befindet sich in einem fr\"{u}hen Entwicklungsstadium und +wurde leider seit einiger Zeit nicht weiterentwickelt. (Trotzdem kann sie sehr hilfreich sein.) + +Da sich alle Bacula-Consolen \"{u}ber das Netzwerk mit dem Director-Dienst verbinden, +ist es nicht notwendig sie auf dem selben Computer laufen zu lassen. + +Ein gewisses, minimales Grundwissen \"{u}ber die Console ist schon dann notwendig, +wenn Bacula auf mehr als einem Tape schreiben soll. Bacula wird n\"{a}mlich nach einem +leeren Band fragen, falls keines mehr verf\"{u}gbar ist, und erst nach dem mounten +eines neuen Tapes mittels der Console, wird Bacula weiterarbeiten k\"{o}nnen. + +\section{Console Konfiguration} +\index[general]{Console Konfiguration} +\index[general]{Konfiguration!Console} +\index[general]{Console Konfiguration} +\index[general]{Konfiguration!Console} + +Wenn Sie die Bacula-Console starten, liest sie ihre Standard-Konfigurations-Datei +namens {\bf bconsole.conf}, bzw. {\bf bgnome-console.conf} f\"{u}r die GNOME-Console, ein. +Im einfachsten Fall enth\"{a}llt diese Datei nur den Namen und die Adresse des Director-Dienstes +sowie das Passwort, dass f\"{u}r die Verbindung zum Director-Dienst ben\"{o}tigt wird. +F\"{u}r weitere Informationen zu dieser Datei, lesen Sie bitte das Kapitel \"{u}ber die \ilink{Console-Konfiguration-Datei}{ConsoleConfChapter} in diesem Handbuch. + +\section{Benutzung des Console-Programms} +\index[general]{Benutzung des Console-Programms} +\index[general]{Programm!Benutzung des Console-} +\index[general]{Benutzung des Console-Programms} +\index[general]{Programm!Benutzungs des Console-} + +Das Console-Programm kann mit den folgenden Optionen gestartet werden: +\footnotesize +\begin{verbatim} +Usage: bconsole [-s] [-c Konfigurations-Datei] [-d Debug-Level] + -c gibt die zu verwendene Konfigurations-Datei an + -dnn setzt den Debug-Lavel auf nn + -n kein conio + -s keine Signale (*) + -t test - list die Konfigurations-Datei und beendet sich dann + -? gibt diese Hilfe aus. +\end{verbatim} +\normalsize + +(*) \elink{Signale}{http://de.wikipedia.org/wiki/Signal\_\%28Computer\%29} + +Nach dem Start des Console-Programms zeigt es durch sein Prompt (*) an, +dass es auf Benutzereingaben wartet. (in der GNOME-Console gibt es kein Prompt, +geben Sie die Befehle bitte einfach in der Textbox unten im Fenster ein.) +Sie k\"{o}nnen in jeder Console einfach nur das Kommando eingeben, wenn weitere Parameter +erforderlich sind, wird das Programm Sie danach fragen. Alternativ k\"{o}nnen Sie +nat\"{u}rlich auch das komplette Kommando mit allen ben\"{o}tigten Parametern eingeben +und ausf\"{u}hren. Das normale Befehlsformat ist dieses: + +\footnotesize +\begin{verbatim} + [=] [=] ... +\end{verbatim} +\normalsize + +wobei {\bf Kommando} einer der unten aufgef\"{u}hrten Console-Befehle +und {\bf Parameter} eines der unten aufgelisteten Schl\"{u}sselw\"{o}rter ist, +dem dann meistens ein {\bf Argument} folgt. Alle Befehle k\"{o}nnen in der +k\"{u}rzesten eindeutigen Form eingegeben werden. Falls zwei Befehle mit identischen +Buchstaben anfangen, wird der ausgef\"{u}hrt, der in der Ausgabe des {\bf help}-Kommandos +am weitesten oben steht. Wenn Sie das andere Kommando ausf\"{u}hren m\"{o}chten m\"{u}ssen Sie +dementsprechend mehr Buchstaben eingeben, um es eindeutig anzugeben. Keiner der +Parameter darf abgek\"{u}rzt werden. + +Ein Beispiel: + +\footnotesize +\begin{verbatim} +list files jobid=23 +\end{verbatim} +\normalsize + +zeigt alle gesicherten Dateien mit der JobID 23 an. + +\footnotesize +\begin{verbatim} +show pools +\end{verbatim} +\normalsize + +zeigt alle Pool-Konfigurations-Eintr\"{a}ge an. + +Die maximale L\"{a}nge der eingegebenen Befehle, mit Parametern, ist 511 Zeichen. +Falls Sie die Console \"{u}ber ein Script ansprechen, denken Sie bitte daran, +dass Sie dieses Limit nicht \"{u}berschreiten. + +\section{Beenden des Console-Programs} +\index[general]{Programm!Beenden des Console-} +\index[general]{Beenden des Console-Programms} +\index[general]{Programm!Beenden des Console-} +\index[general]{Beenden des Console-Programms} + +Normalerweise beenden Sie das Console-Programm durch die Eingabe von {\bf quit} oder {\bf exit}. +Allerdings wartet die Console bis der Director-Dienst das Kommando best\"{a}tigt. Wenn der +Director bereits ein l\"{a}nger laufendes Kommando ausf\"{u}hrt, kann es sein, dass das Beenden +der Console einen Moment dauert. Falls Sie die Console sofort verlassen wollen, k\"{o}nnen Sie +in dem Fall das Kommando {\bf .quit} verwenden. + +Momentan gibt es keinen Weg ein laufendes Kommando nach dem Starten abzubrechen (z.B. mit STRG+C). +Allerdings k\"{o}nnen Sie jederzeit, wenn die Console Sie nach einer weiteren Eingabe fragt, +das aktuelle Kommmando beenden, indem Sie einen Punkt {\bf .} eingeben. Nach der Eingabe des Punktes, +werden Sie automatisch zum Hauptprompt oder bei verschachtelten Abfragen zum passenden letzten Prompt +zur\"{u}ckgeleitet. Bei einigen Eingaben, wie zum Beispiel der Frage nach einem Volume-Namen, wird +der Punkt als Eingabe gewertet und Sie haben beim n\"{a}chsten Prompt die M\"{o}glichkeit, +das Kommando abzubrechen. + +\label{keywords} +\section{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} +\index[general]{Schl\"{u}sselw\"{o}rter!Alphabetische Liste der Console} +\index[general]{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} +\index[general]{Schl\"{u}sselw\"{o}rter!Alphabetische Liste der Console} +\index[general]{Alphabetische Liste der Console-Schl\"{u}sselw\"{o}rter} +Wenn es nicht anders angegeben ist, ben\"{o}tigt jedes der folgenden Schl\"{u}sselw\"{o}rter +(Parameter der Console-Befehle) ein Argument, welches dem Schl\"{u}sselwort, +getrennt durch ein Gleichheitszeichen, folgt. +Ein Beispiel: +\begin{verbatim} +jobid=536 +\end{verbatim} + +Bitte beachten Sie, dass diese Liste durch die st\"{a}ndig weitergehende +Entwicklung eventuell weder komplett, noch in der Richtigen alphabetischen +Reihenfolge sein kann. + +\begin{description} +\item [restart] + Parameter des python-Kommandos, + dadurch wird der python-Interpreter neu gestartet. Ben\"{o}tigt keine Argumente. +\item [all] + Parameter des status und show-Kommandos, + dadurch werden alle Komponenten oder Eintr\"{a}ge ausgew\"{a}hlt +\item [allfrompool] + Parameter des update-Kommandos, + gibt an das alle Volumes des (im Parameter pool angegebenen) Pools + aktualisiert werden sollen. +\item [allfrompools] + Parameter des update-Kommandos, + gibt an das alle Volumes aller Pools aktualisiert werden sollen. +\item [before] + Parameter des restore-Kommandos. +\item [bootstrap] + Parameter des restore-Kommandos. +\item [catalog] + im use-Kommando erlaubt, + um den zu benutzenden Katalog auszuw\"{a}hlen +\item [catalogs] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [client | fd] +\item [clients] + Parameter des show, list und llist-Kommandos, + bezeichnet alle Clients. Ben\"{o}tigt keine Argumente. +\item [counters] + im show-Kommando erlaubt. + Ben\"{o}tigt keine Argumente. +\item [current] + Parameter des restore-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [days] + definiert die Anzahl der Tage, die das "list nextvol"-Kommando + in Betracht ziehen soll. Der Parameter days kann auch im Kommando + "status director" verwendet werden, um die geplanten Jobs f\"{u}r die + angegebene Anzahl Tage zu zeigen. +\item [devices] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [dir | director] +\item [directors] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [directory] + Parameter des restore-Kommandos. + Das Argument gibt das wiederherzustellende Verzeichnis an. +\item [enabled] + Dieser Parameter kann bei den Kommandos "update volumes" und "update slots" + verwendet werden. Das Argument kann yes, true, no, false, archived, 0,1 oder 2 sein. + 0 ist identisch mit no oder false, 1 mit yes oder true und 2 mit archived. + Archived Volumes werden weder benutzt noch automatisch aus dem Katalog gel\"{o}scht. + Volumes die nicht enabled sind, werden nicht f\"{u}r das Backup oder die Wiederherstellung benutzt. +\item [done] + wird im restore-Kommando benutzt. + Ben\"{o}tigt keine Argumente. +\item [file] + Parameter des restore-Kommandos. +\item [files] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [fileset] +\item [filesets] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [help] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [jobs] + Parameter des show, list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [jobmedia] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [jobtotals] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [jobid] + Parameter des list und llist-Kommandos. + Die jobid ist die numerische Jobid, die im Job-Report angezeigt wird. + Sie ist der Index f\"{u}r die Datenbankeintr\"{a}ge des entsprechenden Jobs. + Da sie f\"{u}r alle in der Datenbank existierenden Jobs einzigartig ist, + kann sie erst wiederverwendet werden, wenn der vorherige Job mit dieser Jobid + aus der Datenbank gel\"{o}scht wurde. +\item [job | jobname] + Parameter des list und llist-Kommandos. + Der Job oder JobName entspricht dem Namen den Sie im Job-Eintr\"{a}g + angegeben haben, somit bezieht er sich auf alle Jobs dieses Namens, + die jemals gelaufen sind und deren Eintr\"{a}ge noch im Katalog existieren. +\item [level] +\item [listing] + Parameter des estimate-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [limit] +\item [messages] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [media] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [nextvol | nextvolume] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [on] + Ben\"{o}tigt keine Argumente. +\item [off] + Ben\"{o}tigt keine Argumente. +\item [pool] +\item [pools] + Parameter des show, list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [select] + Parameter des restore-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [storages] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [schedules] + Parameter des show-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [sd | store | storage] +\item [ujobid] + Parameter des list-Kommandos. + Die ujobid ist eine M\"{o}glichkeit einen Job eindeutig zu identifizieren. + Momentan besteht die ujobid aus dem JobNamen und der Uhrzeit wann der Job gelaufen ist. +\item [volume] +\item [volumes] + Parameter des list und llist-Kommandos. + Ben\"{o}tigt keine Argumente. +\item [where] + Parameter des restore-Kommandos. +\item [yes] + Parameter des restore-Kommandos. + Ben\"{o}tigt keine Argumente. +\end{description} + +\label{list} +\section{Alphabetische Liste der Console-Kommandos} +\index[general]{Kommandos!Alphabetische Liste der Console-} +\index[general]{Alphabetische Liste der Console-Kommandos} +\index[general]{Kommandos!Alphabetische Liste der Console-} +\index[general]{Alphabetische Liste der Console-Kommandos} + +Die folgenden Kommandos sind derzeit verf\"{u}gbar: + +\begin{description} +\item [{add [pool=\lt{}pool-name\gt{} storage=\lt{}storage\gt{} + jobid=\lt{}JobId\gt{}]} ] + \index[general]{add} + Das add-Kommando wird benutzt um Volumes zu einem bestehenden Pool + hinzuzuf\"{u}gen. Dazu wird der Volume-Eintrag in der Datenbank erzeugt + und das Volume dem Pool zugeordnet. Dabei erfolgt kein physikalischer Zugriff + auf das Volume. Nach dem hinzuf\"{u}gen zu einem Pool, geht Bacula davon + aus, dass das Volume wirklich existiert und auch bereits gelabelt ist. + Dises Kommando wird normalerweise nicht benutzt, da Bacula die Volumes + automatisch beim labeln einem Pool hinzuf\"{u}gt. Allerdings ist es hilfreich, + falls Sie ein Volume aus dem Katalog gel\"{o}scht haben und es sp\"{a}ter wieder + hinzuf\"{u}gen wollen. + + Typischerweise wird das label-Kommando anstelle des add-Kommandos benutzt, da + es au{\ss}er dem labeln des physikalischen Volumes, die identischen Schritte + wie das add-Kommando ausf\"{u}hrt. Das add-Kommando \"{a}ndert nur die Katalog-Eintr\"{a}ge + und nicht die physikalischen Volumes. Die physikalischen Volumes m\"{u}ssen + vorhanden und gelabelt sein (normalerweise mit dem label-Kommando). Trotzdem + kann das add-Kommando sinnvoll sein, wenn Sie zum Beispiel eine bestimmte Anzahl + von Volumes einem Pool hinzuf\"{u}gen wollen, wobei die Volumes erst zu einem + sp\"{a}teren Zeitpunkt gelabelt werden. Auch um ein Volume eines anderen Bacula-Systems + (bzw. anderen Director-Dienstes) zu importieren, kann das add-Kommando benutzt werden. + Die erlaubten Zeichen f\"{u}r einen Volume-Namen finden Sie weiter unten + in der Beschreibung des label-Kommandos. + +\item [autodisplay on/off] + \index[general]{autodisplay on/off} + Das autodisplay-Kommando kennt zwei Parameter: {\bf on} und {\bf off}, + wodurch die automatische Anzeige von Nachrichten in der Console entsprechend + ein- oder ausgeschaltet wird. Der Standardwert ist {\bf off}, was bedeutet, dass + Sie \"{u}ber neue Meldungen benachrichtigt werden, sie aber nicht automatisch + angezeigt werden. In der GNOME-Console ist das automatische Anzeigen dagegen + standardm\"{a}{\ss}ig aktiviert, d.h. neue Meldungen werden automatisch + ausgegeben, wenn sie vom Director-Dienst empfangen wurden (typischerweise innerhalb von + ca. 5 Sekunden nachdem sie generiert wurden). + + Wenn autodisplay auf off steht, m\"{u}ssen Sie neu Nachrichten mit dem + {\bf messages}-Kommando abrufen, um sie sich anzeigen zu lassen. + Wenn autodiplay auf on steht, werden die Nachrichten angezeigt, sobald die Console sie + empfangen hat. + +\item [automount on/off] + \index[general]{automount on/off} + Das automount-Kommando kennt zwei Parameter: {\bf on} und {\bf off}, + die entsprechend das automatische mounten nach dem labeln ({\bf label}-Kommando) + an- oder ausschalten. Der Standardwert ist on. Wenn automount ausgeschaltet ist, + m\"{u}ssen Sie nach dem labeln eines Volumes dieses explizit mounten ({\bf mount}-Kommando), + um es benutzen zu k\"{o}nnen. + +\item [{cancel [jobid=\lt{}number\gt{} job=\lt{}job-name\gt{} ujobid=\lt{}unique-jobid\gt{}]}] + \index[general]{cancel jobid} + Das cancel-Kommande wird benutzt um einen Job abzubrechen und kennt die + Parameter {\bf jobid=nnn} or {\bf job=xxx}, wober jobid die numerische JobID ist + und job der Job-Name. Wenn Sie weder job noch jobid angeben, listet die Console + alle in Frage kommenden Jobs auf und erlaubt Ihnen aus dieser Liste den abzubrechenden + Job auszuw\"{a}hlen. + + Wenn ein Job als abzubrechen gekennzeichnet wurde, kann es einige Zeit dauern, + bis er tats\"{a}chlich beendet wird (normalerweise innerhalb einer Minute). + Dise Zeit ist aber abh\"{a}ngig davon, was der Job gerade tut. + +\item [{create [pool=\lt{}pool-name\gt{}]}] + \index[general]{create pool} + Das create-Kommando wird normalerweise nicht benutzt, da die Pool-Eintr\"{a}ge + im Katalog automatisch angelegt werden, wenn der Director-Dienst startet und + er seine Pool-Konfiguration aus den Konfigurations-Dateien einliest. Falls ben\"{o}tigt, + kann mit diesem Kommando ein Pool-Eintrag in der Katalog-Datenbank erstellt werden, + der auf einem Pool-Konfigurations-Eintrag basiert, der in der Director-Dienst-Konfiguration + enthalten ist. Einfach gesagt \"{u}bernimmt dieses Kommando nur den Pool-Eintrag aus der + Konfiguration in die Datenbank. Normalerweise wird diese Kommando automatisch ausgef\"{u}hrt, + wenn der Pool zum ersten mal in einem Job-Eintrag benutzt wird. Wenn Sie dieses Kommando + auf einem bestehenden Pool ausf\"{u}hren, wird der Katalog sofort aktualisiert und enth\"{a}lt + dann die identische Pool-Konfiguration, wie die Konfigurations-Dateien. Nach dem Erstellen + eines Pool in den Konfigurations-Dateien werden Sie allerdings h\"{o}chstwahrscheinlich + das {\bf label}-Kommando benutzen, um ein oder mehrere Volumes dem neuen Pool hinzuzuf\"{u}gen + und die entsprechenden Eintr\"{a}ge im Katalog zu erzeugen, anstatt des create-Kommandos. + + Wenn ein Job gestartet wird und Bacula bemerkt, dass keine passender Pool-Eintrag im Katalog ist + aber in den Konfigurations-Dateien, dann wird der Pool im Katalog automatisch angelegt. + Wenn Sie m\"{o}chten, dass der Pool-Eintrag sofort (ohne das ein Job mit diesem Pool gestartet wurde) + im Katalog erscheint, k\"{o}nnen Sie einfach diese Kommando ausf\"{u}hren, um diesen Vorgang + zu erzwingen. + +\item [{delete [volume=\lt{}vol-name\gt{} pool=\lt{}pool-name\gt{} job + jobid=\lt{}id\gt{}]}] + \index[general]{delete} + Das delete-Kommando wird benutzt um ein Volume, einen Pool oder einen Job-Eintrag, + sowie jeweils alle dazugeh\"{o}rigen Datenbank-Eintr\"{a}ge, aus dem Katalog zu + entfernen. Das Kommando \"{a}ndert nur die Katalog-Datenbank, es hat keine + Auswirkungen auf die Konfigurations-Dateien oder die Daten auf den Volumes. + Wir empfehlen Ihnen dieses Kommando nur zu benutzen, wenn Sie wirklich wissen was Sie tun. + + Wenn der Parameter {\bf Volume} angegeben wird, wird das entsprechende Volume aus dem Katalog + gel\"{o}scht, wenn ein {\bf Pool} angeben wird, der entsprechende Pool und bei Angabe des Parameters + {\bf Job} der entsprechende Job, sowie alle zu diesem Job geh\"{o}hrenden JobMedia- und Datei-Eintr\"{a}ge. + Das delete-Kommando kann folgenderma{\ss}en aufgerufen werden: + +\begin{verbatim} +delete pool= oder +\end{verbatim} + +\begin{verbatim} +delete volume=>volume-name> pool=>pool-name> oder +\end{verbatim} + +\begin{verbatim} +delete JobId=>job-id> JobId=>job-id2> ... oder +\end{verbatim} + +\begin{verbatim} +delete Job JobId=n,m,o-r,t ... +\end{verbatim} + + Das erste Beispiel l\"{o}scht einen Pool-Eintrag aus der Katalog-Datenbank. + Das zweite l\"{o}scht einen Volume-Eintrag aus dem angegebenen Pool + und das dritte Beispiel l\"{o}scht die genannten JobID-Eintr\"{a}ge aus + dem Katalog. Es werden die JobIDs n, m, o, p, q, r und t gel\"{o}scht, + wobei die JobID's n, m, o ... nat\"{u}rlich Zahlen entsprechen m\"{u}ssen. + Wie Sie sehen kann das delete-Kommando Listen von JobIDs und auch Bereiche + (z.B. o-r) verarbeiten. + +\item [disable job\lt{}job-name\gt{}] + \index[general]{disable} + Das disable-Kommando erlaubt es Ihnen, zu verhindern das ein Job + automatisch durch den Director-Dienst ausgef\"{u}hrt wird. Wenn Sie den Director-Dienst + neu starten, wird der Status des Jobs wieder auf den Wert gesetzt, der + im Job-Eintrag der Director-Konfiguration eingetragen ist. + +\item [enable job\lt{}job-name\gt{}] + \index[general]{enable} + Das enable-Kommando erlaubt es Ihnen, einen Job der durch das + disable-Kommando aus der automatischen Job-Planung entfernt wurde, + wieder zu aktivieren. Wenn Sie den Director-Dienst neu starten, + wird der Status des Jobs wieder auf den Wert gesetzt, der im + Job-Eintrag der Director-Konfiguration eingetragen ist. + +\label{estimate} +\item [estimate] + \index[general]{estimate} + Mit dem estimate-Kommando k\"{o}nnen Sie sich anzeigen lassen, welche + Dateien durch einen bestimmten Job gesichert werden, ohne diesen Job + ausf\"{u}hren zu m\"{u}ssen. Standardm\"{a}{\ss}ig wird dabei ein Voll-Backup + angenommen. Sie k\"{o}nnen das aber durch den Parameter level entsprechend anpassen, + indem Sie zum Beispiel {\bf level=Incremental} oder {\bf level=Differential} an das + estimate-Kommando mit \"{u}bergeben. Wenn Sie im Aufruf des Kommandos keinen Job-Name + angegeben, wird die Console Ihnen ein Auswahlliste der m\"{o}glichen Jobs anzeigen. + Zus\"{a}tzlich k\"{o}nnen Sie noch die Parameter Client und FileSet angeben. Nach dem + Starten des Kommandos wird der Director-Dienst den Client kontaktieren der daraufhin + eine Liste der zu sichernden Dateien mit ihrer Gr\"{o}{\ss}e zur\"{u}ckgibt. Bitte beachten + Sie, dass das estimate-Kommando nur die Anzahl der von der Datei belegten Bl\"{o}cke zur + Bestimmung der Dateigr\"{o}{\ss}e einbezieht, so dass die Datenmenge die das estimate-Kommando + anzeigt immer etwas gr\"{o}{\ss}er sein wird, als das echte Backup. + + Wahlweise k\"{o}nnen Sie noch den Parameter {\bf listing} mit \"{u}bergeben, + dann wird eine Liste aller zu sichernden Dateien ausgegeben. Abh\"{a}ngig vom FileSet + kann diese Liste sehr lang sein und es daher einige Zeit dauern, alle Dateien anzuzeigen. + Das estimate-Kommando kann folgenderma{\ss}en aufgerufen werden: + + +\begin{verbatim} +estimate job= listing client= + fileset= level= +\end{verbatim} + + die Angabe des Jobs ist ausreichend, aber Sie k\"{o}nnen durch Angabe + des Clients, FileSets und/oder des Backup-Levels die entsprechenden Werte \"{u}berschreiben. + +Zum Beispiel k\"{o}nnen Sie folgendes eingeben: + +\footnotesize +\begin{verbatim} + @output /tmp/listing + estimate job=NightlySave listing level=Incremental + @output +\end{verbatim} +\normalsize + + durch das erste Kommando wird die Ausgabe der Console in die Datei + {\bf /tmp/listing} umgeleitet. Dann wird durch das estimate-Kommando + eine Liste aller Dateien erstellt, die beim n\"{a}chsten inkrementellen + Backup des Jobs {\bf NightlySave} gesichert werden. Die Console gibt dabei keine + Meldungen aus, da die Ausgabe ja auf die Datei /tmp/listing zeigt. Durch + das dritte Kommando @output wird die Umleitung der Ausgabe wieder aufgehoben. + Beachten Sie bitte, dass die angezeigten Bytes in der Ausgabe des estimate-Kommandos + \"{u}ber die Angabe der Dateigr\"{o}{\ss}e im Verzeichnis-Eintrag bestimmt wird. + Das kann zu gro{\ss}en Abweichungen bei der ermittelten Backup-Gr\"{o}{\ss}e f\"{u}hren, + falls im FileSet \elink{sparse}{http://de.wikipedia.org/wiki/Sparse-Datei}-Dateien + vorhanden sind. sparse-Dateien finden sich oft auf 64-Bit-Maschinen, wo sie f\"{u}r + bestimmte Systemdateien benutzt werden. Die angezeigten Bytes sind die Gesammtgr\"{o}{\ss}e + der gesicherten Dateien, wenn die FileSet-Option "sparse" nicht gesetzt ist. + Momentan gibt es keinen Weg, um mit dem estimate-Kommando die echte Backup-Gr\"{o}{\ss}e + f\"{u}r ein FileSet anzuzeigen, bei dem die sparse-Option gesetzt ist. + +\item [help] + \index[general]{help} + Das help-Kommando zeigt alle verf\"{u}gbaren Kommandos mit einer kurzen Beschreibung an. + +\item [label] + \index[general]{label} + \index[general]{relabel} + \index[general]{label} + \index[general]{relabel} + Das label-Kommando wird benutzt um physikalische Volumes zu labeln. + Das label-Kommando kann folgenderma{\ss}en aufgerufen werden: + +\begin{verbatim} +label storage=>storage-name> volume=>volume-name> slot=>slot> +\end{verbatim} + + Wenn Sie einen der Parameter storage, volume oder slot nicht angeben, + werden Sie von der Console danach gefragt. Der Media-Typ wird automatisch + anhand des Storage-Eintrags in der Director-Konfiguration gesetzt. + Wenn alle ben\"{o}tigten Informationen vorliegen, kontaktiert die + Console den angegebenen Storage-Dienst und sendet das label-Kommando. + Wenn das labeln erfolgreich war, wird ein entsprechender Volume-Eintrag + im passenden Pool erzeugt. + + Im Volume-Name d\"{u}rfen Buchstaben, Zahlen und folgende Sonderzeichen + verwendet werden: Binde- ({\bf -}) und Unterstrich ({\bf \_}), + Doppelpunkt ({\bf :}) und Punkt ({\bf .}). Alle anderen Zeichen, + einschlie{\ss}lich des Leerzeichens, sind nicht erlaubt. + Durch diese Einschr\"{a}nkung soll sichergestellt werden, dass + die Volume-Namen gut lesbar sind und es nicht zu Benutzerfehlern + aufgrund von Sonderzeichen im Namen kommt. + + Bitte beachten Sie, dass Bacula einen Ein-/Ausgabefehler meldet, + wenn ein neues bzw. komplett leeres Volume gelabelt wird. Bacula + versucht den ersten Block des Volumes zu lesen, um ein eventuell schon + vorhandenes label nicht zu \"{u}berschreiben, dieser Versuch erzeugt + den oben genannten Fehler. Um diesen Fehler zu vermeiden, k\"{o}nnen Sie + mit den folgenden Shell-Kommandos ein EOF am den Anfang des Volumes schreiben: + +\footnotesize +\begin{verbatim} + mt rewind + mt weof + +\end{verbatim} +\normalsize + +Das label-Kommando kann aufgrund verschiedener Gr\"{u}nde fehlschlagen: + +\begin{enumerate} +\item Der angegebene Volume-Name existiert schon in der Katalog-Datenbank + +\item Der Storage-Dienst hat schon ein Tape oder anderes Volume in dem + ben\"{o}tigten Ger\"{a}t gemountet. In diesem Fall m\"{u}ssen Sie + das Ger\"{a}t erst mit dem {\bf unmount}-Kommando freigeben und dann + ein leeres Volume zum labeln einlegen. + +\item Das Volume ist bereits gelabelt. Bacula wird niemals ein bestehendes label + \"{u}berschreiben, solange das Volume nicht abgelaufen ist und Sie das + {\bf relabel}-Kommando verwenden. + +\item Es ist kein Volume im Ger\"{a}t. +\end{enumerate} + +Es gibt zwei M\"{o}glichkeiten ein bestehendes Bacula-label zu \"{u}berschreiben. +Die brutale Methode ist es, einfach ein EOF an den Anfang des Volumes zu schreiben +(dabei wird das bestehende label durch das EOF \"{u}berschrieben). +Mit dem Programm {\bf mt} k\"{o}nnen Sie das zum Beispiel so tun: + +\footnotesize +\begin{verbatim} + [user@host]$ mt -f /dev/st0 rewind + [user@host]$ mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +Ein Festplatten-Volume k\"{o}nnen Sie auch manuell l\"{o}schen. + +Danach benutzten Sie das label-Kommando, um ein neues label zu erzeugen. +Allerdings kann diese Vorgehensweise Spuren des alten Volumes in der +Katalog-Datenbank hinterlassen. + +Die bevorzugte Methode ein Volume neu zu labeln sollte es sein, +zuerst das Volume als bereinigt (purged) zu markieren. Das passiert entweder automatisch, +wenn die Aufbewahrungszeit (Volume-Retention) f\"{u}r das Volume abl\"{a}uft, +oder kann aber auch mit dem {\bf purge}-Kommando erzwungen werden. +Danach k\"{o}nnen Sie das {\bf relabel}-Kommando, wie weiter unten beschrieben, verwenden. + +Falls Ihr Autochanger Barcode-Labels unterst\"{u}tzt, k\"{o}nnen Sie +alle Volumes im Autochanger, eins nach dem anderen, mit dem Kommando +{\bf label barcodes} labeln. Dabei wird jedes Tape mit Barcode nacheinander +im Laufwerk gemountet und mit der auf dem Barcode enthaltenen Zeichenfolge +als Namen gelabelt. Ein entsprechender Katalog-Eintrag wird automatisch +mit erzeugt. Jedes Volume mit einem Barcode der mit den Zeichen beginnt, +die im Pool-Eintrag als CleaningPrefix konfiguriert sind, wird wie ein +Reinigungsband behandelt und nicht gelabelt. Allerdings wird dabei auch +ein Katalog-Eintrag f\"{u}r das Reinigungsband erstellt. + +Als Beispiel, mit dem Eintrag: +\footnotesize +\begin{verbatim} + Pool { + Name ... + Cleaning Prefix = "CLN" + } +\end{verbatim} +\normalsize + +wird jedes Tape, dessen Barcode mit CLN beginnt, als Reinigungsband betrachtet +und nicht automatisch gemountet. +Das label-Kommando kann folgenderma{\ss}en aufgerufen werden: + +\footnotesize +\begin{verbatim} +label storage=xxx pool=yyy slots=1-5,10 barcodes +\end{verbatim} +\normalsize + +\item [list] + \index[general]{list} + Das list-Kommando zeigt den angegebenen Inhalt der Katalog-Datenbank an. + Die verschiedenen Felder jedes Eintrags werden in einer Zeile ausgegeben. + Die verschiedenen M\"{o}glichkeiten sind: +\footnotesize +\begin{verbatim} + list jobs + + list jobid= (zeigt jobid an) + + list ujobid (zeigt den job mit dem Namen an) + + list job= (zeigt alle Jobs mit dem Namen an) + + list jobname= (identisch mit dem oberen) + + Im oberen Beispiel k\"{o}nnen Sie auch den Parameter limit=nn + hinzuf\"{u}gen, um die Ausgabe des Kommandos auf nn Jobs zu begrenzen + + list jobmedia + + list jobmedia jobid= + + list jobmedia job= + + list files jobid= + + list files job= + + list pools + + list clients + + list jobtotals + + list volumes + + list volumes jobid= + + list volumes pool= + + list volumes job= + + list volume= + + list nextvolume job= + + list nextvol job= + + list nextvol job= days=nnn + +\end{verbatim} +\normalsize + + Die meisten der oben genannten Parameter sollten selbsterkl\"{a}rend sein. + \"{U}blicherweise werden Sie, falls Sie nicht gen\"{u}gend Parameter angeben, + von der Console nach den fehlenden Informationen gefragt. + + Das {\bf list nextvol}-Kommando gibt den Volume-Namen aus, der von dem angegebenen Job + beim n\"{a}chsten Backup benutzt werden wird. Allerdings sollten Sie beachten, dass + das tats\"{a}chlich benutzte Volume von einer Reihe von Faktoren, wie zum Beispiel + von den vorher laufenden Jobs oder der Zeit wann der Job l\"{a}uft, abh\"{a}ngen kann. + Eventuell wird ein Tape schon voll sein, das aber noch freien Platz hatte, als Sie + das Kommando ausf\"{u}hrten. Dieses Kommando gibt Ihnen also nur einen Hinweis darauf, + welches Tape benutzt werden k\"{o}nnte, aber es kann keine definitive Aussage dar\"{u}ber treffen. + Zus\"{a}tzlich kann dieses Kommando mehrere Seiteneffekte haben, da es den selben + Algorithmus durchl\"{a}uft, wie ein echter Backup-Job. Das bedeutet, dass es dazu f\"{u}hren kann, + dass aufgrund dieses Kommandos Volumes automatisch recycled oder gelöscht (purged) werden. + Standardm\"{a}{\ss}ig muss der angegebene Job innerhalb der n\"{a}chsten zwei Tage laufen, + ansonsten wird kein Volume f\"{u}r den Job gefunden. Allerdings k\"{o}nnen Sie durch Angabe des Parameters + {\bf days=nnn} bis zu 50 Tage in die Zukunft angeben, die das Kommando in die Berechnung + mit einbeziehen soll. Falls Sie, zum Beispiel, Freitags sehen wollen, welches Volume am Montag + vorrausssichtlich benutzt wird, k\"{o}nnen Sie folgendes Kommando benutzen: + {\bf list nextvol job=MyJob days=3}. + + Wenn Sie bestimmte, von Ihnen \"{o}fter ben\"{o}tigte, eigene Kommandos anlegen wollen + um sich bestimmte Inhalte der Katalog-Datenbank anzeigen zu lassen, + k\"{o}nnen Sie diese der Datei {\bf query.sql} hinzu\"{u}gen. Allerdings + erfordert das einiges an Wissen \"{u}ber SQL-Kommandos. Lesen Sie dazu bitte + den Abschnitt \"{u}ber das {\bf query}-Kommando in diesem Kapitel. + + Weiter unten finden Sie auch eine Beispiel-Ausgabe des {\bf llist}-Kommandos, + das Ihnen den kompletten Inhalt des Katalogs zu einem bestimmten Konfigurations-Eintrag + anzeigt. + + Als ein Beispiel, kann Ihnen der Aufruf des Kommandos {\bf list pools} die folgenden + Ausgaben anzeigen: + +\footnotesize +\begin{verbatim} ++------+---------+---------+---------+----------+-------------+ +| PoId | Name | NumVols | MaxVols | PoolType | LabelFormat | ++------+---------+---------+---------+----------+-------------+ +| 1 | Default | 0 | 0 | Backup | * | +| 2 | Recycle | 0 | 8 | Backup | File | ++------+---------+---------+---------+----------+-------------+ +\end{verbatim} +\normalsize + + As mentioned above, the {\bf list} command lists what is in the + database. Some things are put into the database immediately when Bacula + starts up, but in general, most things are put in only when they are + first used, which is the case for a Client as with Job records, etc. + + Bacula should create a client record in the database the first time you + run a job for that client. Doing a {\bf status} will not cause a + database record to be created. The client database record will be + created whether or not the job fails, but it must at least start. When + the Client is actually contacted, additional info from the client will + be added to the client record (a "uname -a" output). + + If you want to see what Client resources you have available in your conf + file, you use the Console command {\bf show clients}. + +\item [llist] + \index[general]{llist} + The llist or "long list" command takes all the same arguments that the + list command described above does. The difference is that the llist + command list the full contents of each database record selected. It + does so by listing the various fields of the record vertically, with one + field per line. It is possible to produce a very large number of output + lines with this command. + + If instead of the {\bf list pools} as in the example above, you enter + {\bf llist pools} you might get the following output: + +\footnotesize +\begin{verbatim} + PoolId: 1 + Name: Default + NumVols: 0 + MaxVols: 0 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 1,296,000 + VolUseDuration: 86,400 + MaxVolJobs: 0 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: * + + PoolId: 2 + Name: Recycle + NumVols: 0 + MaxVols: 8 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 3,600 + VolUseDuration: 3,600 + MaxVolJobs: 1 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: File + +\end{verbatim} +\normalsize + +\item [messages] + \index[general]{messages} + This command causes any pending console messages to be immediately displayed. + + +\item [mount] + \index[general]{mount} + The mount command is used to get Bacula to read a volume on a physical + device. It is a way to tell Bacula that you have mounted a tape and + that Bacula should examine the tape. This command is normally + used only after there was no Volume in a drive and Bacula requests you to mount a new + Volume or when you have specifically unmounted a Volume with the {\bf + unmount} console command, which causes Bacula to close the drive. If + you have an autoloader, the mount command will not cause Bacula to + operate the autoloader unless you specify a {\bf slot} and possibly a + {\bf drive}. The various forms of the mount command are: + +mount storage=\lt{}storage-name\gt{} [ slot=\lt{}num\gt{} ] [ + drive=\lt{}num\gt{} ] + +mount [ jobid=\lt{}id\gt{} | job=\lt{}job-name\gt{} ] + + If you have specified {\bf Automatic Mount = yes} in the Storage daemon's + Device resource, under most circumstances, Bacula will automatically access + the Volume unless you have explicitly {\bf unmount}ed it in the Console + program. + +\item[python] + \index[general]{python} + The python command takes a single argument {\bf restart}: + +python restart + + This causes the Python interpreter in the Director to be reinitialized. + This can be helpful for testing because once the Director starts and the + Python interpreter is initialized, there is no other way to make it + accept any changes to the startup script {\bf DirStartUp.py}. For more + details on Python scripting, please see the \ilink{Python + Scripting}{PythonChapter} chapter of this manual. + +\label{ManualPruning} +\item [prune] + \index[general]{prune} + The Prune command allows you to safely remove expired database records + from Jobs and Volumes. This command works only on the Catalog database + and does not affect data written to Volumes. In all cases, the Prune + command applies a retention period to the specified records. You can + Prune expired File entries from Job records; you can Prune expired Job + records from the database, and you can Prune both expired Job and File + records from specified Volumes. + +prune files|jobs|volume client=\lt{}client-name\gt{} +volume=\lt{}volume-name\gt{} + + For a Volume to be pruned, the {\bf VolStatus} must be Full, Used, or + Append, otherwise the pruning will not take place. + +\item [purge] + \index[general]{purge} + The Purge command will delete associated Catalog database records from + Jobs and Volumes without considering the retention period. {\bf Purge} + works only on the Catalog database and does not affect data written to + Volumes. This command can be dangerous because you can delete catalog + records associated with current backups of files, and we recommend that + you do not use it unless you know what you are doing. The permitted + forms of {\bf purge} are: + +purge files jobid=\lt{}jobid\gt{}|job=\lt{}job-name\gt{}|client=\lt{}client-name\gt{} + +purge jobs client=\lt{}client-name\gt{} (of all jobs) + +purge volume|volume=\lt{}vol-name\gt{} (of all jobs) + +For the {\bf purge} command to work on Volume Catalog database records the +{\bf VolStatus} must be Append, Full, Used, or Error. + +The actual data written to the Volume will be unaffected by this command. + +\item [relabel] + \index[general]{relabel} + \index[general]{relabel} + This command is used to label physical volumes. The full form of this + command is: + +relabel storage=\lt{}storage-name\gt{} oldvolume=\lt{}old-volume-name\gt{} + volume=\lt{}newvolume-name\gt{} + + If you leave out any part, you will be prompted for it. In order for + the Volume (old-volume-name) to be relabeled, it must be in the catalog, + and the volume status must be marked {\bf Purged} or {\bf Recycle}. + This happens automatically as a result of applying retention periods, or + you may explicitly purge the volume using the {\bf purge} command. + + Once the volume is physically relabeled, the old data previously written + on the Volume is lost and cannot be recovered. + +\item [release] + \index[general]{release} + This command is used to cause the Storage daemon to rewind (release) the + current tape in the drive, and to re-read the Volume label the next time + the tape is used. + +release storage=\lt{}storage-name\gt{} + + After a release command, the device is still kept open by Bacula (unless + Always Open is set to No in the Storage Daemon's configuration) so it + cannot be used by another program. However, with some tape drives, the + operator can remove the current tape and to insert a different one, and + when the next Job starts, Bacula will know to re-read the tape label to + find out what tape is mounted. If you want to be able to use the drive + with another program (e.g. {\bf mt}), you must use the {\bf unmount} + command to cause Bacula to completely release (close) the device. + +\item [reload] + \index[general]{reload} + The reload command causes the Director to re-read its configuration + file and apply the new values. The new values will take effect + immediately for all new jobs. However, if you change schedules, + be aware that the scheduler pre-schedules jobs up to two hours in + advance, so any changes that are to take place during the next two + hours may be delayed. Jobs that have already been scheduled to run + (i.e. surpassed their requested start time) will continue with the + old values. New jobs will use the new values. Each time you issue + a reload command while jobs are running, the prior config values + will queued until all jobs that were running before issuing + the reload terminate, at which time the old config values will + be released from memory. The Directory permits keeping up to + ten prior set of configurations before it will refuse a reload + command. Once at least one old set of config values has been + released it will again accept new reload commands. + + While it is possible to reload the Director's configuration on the fly, + even while jobs are executing, this is a complex operation and not + without side effects. Accordingly, if you have to reload the Director's + configuration while Bacula is running, it is advisable to restart the + Director at the next convenient opportunity. + +\label{restore_command} +\item [restore] + \index[general]{restore} + The restore command allows you to select one or more Jobs (JobIds) to be + restored using various methods. Once the JobIds are selected, the File + records for those Jobs are placed in an internal Bacula directory tree, + and the restore enters a file selection mode that allows you to + interactively walk up and down the file tree selecting individual files + to be restored. This mode is somewhat similar to the standard Unix {\bf + restore} program's interactive file selection mode. + +restore storage=\lt{}storage-name\gt{} client=\lt{}backup-client-name\gt{} + where=\lt{}path\gt{} pool=\lt{}pool-name\gt{} fileset=\lt{}fileset-name\gt{} + restoreclient=\lt{}restore-client-name\gt{} + select current all done + + Where {\bf current}, if specified, tells the restore command to + automatically select a restore to the most current backup. If not + specified, you will be prompted. The {\bf all} specification tells the + restore command to restore all files. If it is not specified, you will + be prompted for the files to restore. For details of the {\bf restore} + command, please see the \ilink{Restore Chapter}{RestoreChapter} of this + manual. + + The client keyword initially specifies the client from which the backup + was made and the client to which the restore will be make. However, + if the restoreclient keyword is specified, then the restore is written + to that client. + +\item [run] + \index[general]{run} + This command allows you to schedule jobs to be run immediately. The full form + of the command is: + +run job=\lt{}job-name\gt{} client=\lt{}client-name\gt{} + fileset=\lt{}FileSet-name\gt{} level=\lt{}level-keyword\gt{} + storage=\lt{}storage-name\gt{} where=\lt{}directory-prefix\gt{} + when=\lt{}universal-time-specification\gt{} yes + + Any information that is needed but not specified will be listed for + selection, and before starting the job, you will be prompted to accept, + reject, or modify the parameters of the job to be run, unless you have + specified {\bf yes}, in which case the job will be immediately sent to + the scheduler. + + On my system, when I enter a run command, I get the following prompt: + +\footnotesize +\begin{verbatim} +A job name must be specified. +The defined Job resources are: + 1: Matou + 2: Polymatou + 3: Rufus + 4: Minimatou + 5: Minou + 6: PmatouVerify + 7: MatouVerify + 8: RufusVerify + 9: Watchdog +Select Job resource (1-9): + +\end{verbatim} +\normalsize + +If I then select number 5, I am prompted with: + +\footnotesize +\begin{verbatim} +Run Backup job +JobName: Minou +FileSet: Minou Full Set +Level: Incremental +Client: Minou +Storage: DLTDrive +Pool: Default +When: 2003-04-23 17:08:18 +OK to run? (yes/mod/no): + +\end{verbatim} +\normalsize + +If I now enter {\bf yes}, the Job will be run. If I enter {\bf mod}, I will +be presented with the following prompt. + +\footnotesize +\begin{verbatim} +Parameters to modify: + 1: Level + 2: Storage + 3: Job + 4: FileSet + 5: Client + 6: When + 7: Pool +Select parameter to modify (1-7): + +\end{verbatim} +\normalsize + +If you wish to start a job at a later time, you can do so by setting the When +time. Use the {\bf mod} option and select {\bf When} (no. 6). Then enter the +desired start time in YYYY-MM-DD HH:MM:SS format. + +\item [setdebug] + \index[general]{setdebug} + \index[general]{setdebug} + \index[general]{debugging} + \index[general]{debugging Win32} + \index[general]{Windows!debugging} + This command is used to set the debug level in each daemon. The form of this + command is: + +setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | + storage=\lt{}storage-name\gt{} | all] + + If trace=1 is set, then tracing will be enabled, and the daemon will be + placed in trace mode, which means that all debug output as set by the + debug level will be directed to the file {\bf bacula.trace} in the + current directory of the daemon. Normally, tracing is needed only for + Win32 clients where the debug output cannot be written to a terminal or + redirected to a file. When tracing, each debug output message is + appended to the trace file. You must explicitly delete the file when + you are done. + +\item [show] + \index[general]{show} + \index[general]{show} + The show command will list the Director's resource records as defined in + the Director's configuration file (normally {\bf bacula-dir.conf}). + This command is used mainly for debugging purposes by developers. + The following keywords are accepted on the + show command line: catalogs, clients, counters, devices, directors, + filesets, jobs, messages, pools, schedules, storages, all, help. + Please don't confuse this command + with the {\bf list}, which displays the contents of the catalog. + +\item [sqlquery] + \index[general]{sqlquery} + The sqlquery command puts the Console program into SQL query mode where + each line you enter is concatenated to the previous line until a + semicolon (;) is seen. The semicolon terminates the command, which is + then passed directly to the SQL database engine. When the output from + the SQL engine is displayed, the formation of a new SQL command begins. + To terminate SQL query mode and return to the Console command prompt, + you enter a period (.) in column 1. + + Using this command, you can query the SQL catalog database directly. + Note you should really know what you are doing otherwise you could + damage the catalog database. See the {\bf query} command below for + simpler and safer way of entering SQL queries. + + Depending on what database engine you are using (MySQL, PostgreSQL or + SQLite), you will have somewhat different SQL commands available. For + more detailed information, please refer to the MySQL, PostgreSQL or + SQLite documentation. + +\item [status] + \index[general]{status} + This command will display the status of the next jobs that are scheduled + during the next 24 hours as well as the status of currently + running jobs. The full form of this command is: + +status [all | dir=\lt{}dir-name\gt{} | director | + client=\lt{}client-name\gt{} | storage=\lt{}storage-name\gt{} | + days=nnn] + + If you do a {\bf status dir}, the console will list any currently + running jobs, a summary of all jobs scheduled to be run in the next 24 + hours, and a listing of the last ten terminated jobs with their statuses. + The scheduled jobs summary will include the Volume name to be used. You + should be aware of two things: 1. to obtain the volume name, the code + goes through the same code that will be used when the job runs, but it + does not do pruning nor recycling of Volumes; 2. The Volume listed is + at best a guess. The Volume actually used may be different because of + the time difference (more durations may expire when the job runs) and + another job could completely fill the Volume requiring a new one. + + In the Running Jobs listing, you may find the following types of + information: + + +\footnotesize +\begin{verbatim} +2507 Catalog MatouVerify.2004-03-13_05.05.02 is waiting execution +5349 Full CatalogBackup.2004-03-13_01.10.00 is waiting for higher + priority jobs to finish +5348 Differe Minou.2004-03-13_01.05.09 is waiting on max Storage jobs +5343 Full Rufus.2004-03-13_01.05.04 is running +\end{verbatim} +\normalsize + + Looking at the above listing from bottom to top, obviously JobId 5343 + (Rufus) is running. JobId 5348 (Minou) is waiting for JobId 5343 to + finish because it is using the Storage resource, hence the "waiting on + max Storage jobs". JobId 5349 has a lower priority than all the other + jobs so it is waiting for higher priority jobs to finish, and finally, + JobId 2508 (MatouVerify) is waiting because only one job can run at a + time, hence it is simply "waiting execution" + + If you do a {\bf status dir}, it will by default list the first + occurrence of all jobs that are scheduled today and tomorrow. If you + wish to see the jobs that are scheduled in the next three days (e.g. on + Friday you want to see the first occurrence of what tapes are scheduled + to be used on Friday, the weekend, and Monday), you can add the {\bf + days=3} option. Note, a {\bf days=0} shows the first occurrence of jobs + scheduled today only. If you have multiple run statements, the first + occurrence of each run statement for the job will be displayed for the + period specified. + + If your job seems to be blocked, you can get a general idea of the + problem by doing a {\bf status dir}, but you can most often get a + much more specific indication of the problem by doing a + {\bf status storage=xxx}. For example, on an idle test system, when + I do {\bf status storage=File}, I get: +\footnotesize +\begin{verbatim} +status storage=File +Connecting to Storage daemon File at 192.168.68.112:8103 + +rufus-sd Version: 1.39.6 (24 March 2006) i686-pc-linux-gnu redhat (Stentz) +Daemon started 26-Mar-06 11:06, 0 Jobs run since started. + +Running Jobs: +No Jobs running. +==== + +Jobs waiting to reserve a drive: +==== + +Terminated Jobs: + JobId Level Files Bytes Status Finished Name +====================================================================== + 59 Full 234 4,417,599 OK 15-Jan-06 11:54 kernsave +==== + +Device status: +utochanger "DDS-4-changer" with devices: + "DDS-4" (/dev/nst0) +Device "DDS-4" (/dev/nst0) is mounted with Volume="TestVolume002" +Pool="*unknown*" + Slot 2 is loaded in drive 0. + Total Bytes Read=0 Blocks Read=0 Bytes/block=0 + Positioned at File=0 Block=0 +Device "Dummy" is not open or does not exist. +No DEVICE structure. + +Device "DVD-Writer" (/dev/hdc) is not open. +Device "File" (/tmp) is not open. +==== + +In Use Volume status: +==== +\end{verbatim} +\normalsize + +Now, what this tells me is that no jobs are running and that none of +the devices are in use. Now, if I {\bf unmount} the autochanger, which +will not be used in this example, and then start a Job that uses the +File device, the job will block. When I re-issue the status storage +command, I get for the Device status: + +\footnotesize +\begin{verbatim} +status storage=File +... +Device status: +Autochanger "DDS-4-changer" with devices: + "DDS-4" (/dev/nst0) +Device "DDS-4" (/dev/nst0) is not open. + Device is BLOCKED. User unmounted. + Drive 0 is not loaded. +Device "Dummy" is not open or does not exist. +No DEVICE structure. + +Device "DVD-Writer" (/dev/hdc) is not open. +Device "File" (/tmp) is not open. + Device is BLOCKED waiting for media. +==== +... +\end{verbatim} +\normalsize + +Now, here it should be clear that if a job were running that wanted +to use the Autochanger (with two devices), it would block because +the user unmounted the device. The real problem for the Job I started +using the "File" device is that the device is blocked waiting for +media -- that is Bacula needs you to label a Volume. + +\item [unmount] + \index[general]{unmount} + This command causes the indicated Bacula Storage daemon to unmount the + specified device. The forms of the command are the same as the mount command: +\footnotesize +\begin{verbatim} +unmount storage= [ drive= ] + +unmount [ jobid= | job= ] +\end{verbatim} +\normalsize + + Once you unmount a storage device, Bacula will no longer be able to use + it until you issue a mount command for that device. If Bacula needs to + access that device, it will block and issue mount requests periodically + to the operator. + + If the device you are unmounting is an autochanger, it will unload + the drive you have specified on the command line. If no drive is + specified, it will assume drive 1. + +\label{UpdateCommand} +\item [update] + \index[general]{update} + This command will update the catalog for either a specific Pool record, a Volume + record, or the Slots in an autochanger with barcode capability. In the case + of updating a Pool record, the new information will be automatically taken + from the corresponding Director's configuration resource record. It can be + used to increase the maximum number of volumes permitted or to set a maximum + number of volumes. The following main keywords may be specified: +\footnotesize +\begin{verbatim} + media, volume, pool, slots +\end{verbatim} +\normalsize + +In the case of updating a Volume, you will be prompted for which value you +wish to change. The following Volume parameters may be changed: + +\footnotesize +\begin{verbatim} + + Volume Status + Volume Retention Period + Volume Use Duration + Maximum Volume Jobs + Maximum Volume Files + Maximum Volume Bytes + Recycle Flag + Recycle Pool + Slot + InChanger Flag + Pool + Volume Files + Volume from Pool + All Volumes from Pool + All Volumes from all Pools + +\end{verbatim} +\normalsize + + For slots {\bf update slots}, Bacula will obtain a list of slots and + their barcodes from the Storage daemon, and for each barcode found, it + will automatically update the slot in the catalog Media record to + correspond to the new value. This is very useful if you have moved + cassettes in the magazine, or if you have removed the magazine and + inserted a different one. As the slot of each Volume is updated, the + InChanger flag for that Volume will also be set, and any other Volumes + in the Pool that were last mounted on the same Storage device + will have their InChanger flag turned off. This permits + Bacula to know what magazine (tape holder) is currently in the + autochanger. + + If you do not have barcodes, you can accomplish the same thing in + version 1.33 and later by using the {\bf update slots scan} command. + The {\bf scan} keyword tells Bacula to physically mount each tape and to + read its VolumeName. + + For Pool {\bf update pool}, Bacula will move the Volume record from its + existing pool to the pool specified. + + For {\bf Volume from Pool}, {\bf All Volumes from Pool} and {\bf All Volumes + from all Pools}, the following values are updated from the Pool record: + Recycle, RecyclePool, VolRetention, VolUseDuration, MaxVolJobs, MaxVolFiles, + and MaxVolBytes. (RecyclePool feature is available with bacula 2.1.4 or + higher.) + + The full form of the update command with all command line arguments is: + +\footnotesize +\begin{verbatim} + update volume=xxx pool=yyy slots volstatus=xxx VolRetention=ddd + VolUse=ddd MaxVolJobs=nnn MaxVolBytes=nnn Recycle=yes|no + slot=nnn enabled=n recyclepool=zzz + +\end{verbatim} +\normalsize + +\item [use] + \index[general]{use} + This command allows you to specify which Catalog database to use. Normally, +you will be using only one database so this will be done automatically. In +the case that you are using more than one database, you can use this command +to switch from one to another. + +use \lt{}database-name\gt{} + +\item [var] + \label{var} + \index[general]{var name} + This command takes a string or quoted string and does variable expansion on + it the same way variable expansion is done on the {\bf LabelFormat} string. + Thus, for the most part, you can test your LabelFormat strings. The + difference between the {\bf var} command and the actual LabelFormat process + is that during the var command, no job is running so "dummy" values are + used in place of Job specific variables. Generally, however, you will get a + good idea of what is going to happen in the real case. + +\item [version] + \index[general]{version} + The command prints the Director's version. + +\item [quit] + \index[general]{quit} + This command terminates the console program. The console program sends the + {\bf quit} request to the Director and waits for acknowledgment. If the + Director is busy doing a previous command for you that has not terminated, it + may take some time. You may quit immediately by issuing the {\bf .quit} + command (i.e. quit preceded by a period). + +\item [query] + \index[general]{query} + This command reads a predefined SQL query from the query file (the name and + location of the query file is defined with the QueryFile resource record in + the Director's configuration file). You are prompted to select a query from + the file, and possibly enter one or more parameters, then the command is + submitted to the Catalog database SQL engine. + +The following queries are currently available (version 1.24): + +\footnotesize +\begin{verbatim} +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 20 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: + 9: List where a File is saved: +Choose a query (1-9): + +\end{verbatim} +\normalsize + +\item [exit] + \index[general]{exit} + This command terminates the console program. + +\item [wait] + \index[general]{wait} + The wait command causes the Director to pause until there are no jobs + running. This command is useful in a batch situation such as regression + testing where you wish to start a job and wait until that job completes + before continuing. This command now has the following options: +\footnotesize +\begin{verbatim} + wait [jobid=nn] [jobuid=unique id] [job=job name] +\end{verbatim} +\normalsize + If specified with a specific JobId, ... the wait command will wait + for that particular job to terminate before continuing. + +\end{description} + +\label{dotcommands} +\section{Special dot Commands} +\index[general]{Commands!Special dot} +\index[general]{Special dot Commands} + +There is a list of commands that are prefixed with a period (.). These +commands are intended to be used either by batch programs or graphical user +interface front-ends. They are not normally used by interactive users. Once +GUI development begins, this list will be considerably expanded. The following +is the list of dot commands: + +\footnotesize +\begin{verbatim} +.backups job=xxx list backups for specified job +.clients list all client names +.defaults client=xxx fileset=yyy list defaults for specified client +.die cause the Director to segment fault (for debugging) +.dir when in tree mode prints the equivalent to the dir command, + but with fields separated by commas rather than spaces. +.exit quit +.filesets list all fileset names +.help help command output +.jobs list all job names +.levels list all levels +.messages get quick messages +.msgs return any queued messages +.pools list all pool names +.quit quit +.status get status output +.storage return storage resource names +.types list job types +\end{verbatim} +\normalsize + +\label{atcommands} + +\section{Special At (@) Commands} +\index[general]{Commands!Special At @} +\index[general]{Special At (@) Commands} + +Normally, all commands entered to the Console program are immediately +forwarded to the Director, which may be on another machine, to be executed. +However, there is a small list of {\bf at} commands, all beginning with an at +character (@), that will not be sent to the Director, but rather interpreted +by the Console program directly. Note, these commands are implemented only in +the tty console program and not in the GNOME Console. These commands are: + +\begin{description} + +\item [@input \lt{}filename\gt{}] + \index[general]{@input \lt{}filename\gt{}} + Read and execute the commands contained in the file specified. + +\item [@output \lt{}filename\gt{} w/a] + \index[general]{@output \lt{}filename\gt{} w/a} + Send all following output to the filename specified either overwriting the +file (w) or appending to the file (a). To redirect the output to the +terminal, simply enter {\bf @output} without a filename specification. +WARNING: be careful not to overwrite a valid file. A typical example during a +regression test might be: + +\footnotesize +\begin{verbatim} + @output /dev/null + commands ... + @output + +\end{verbatim} +\normalsize + +\item [@tee \lt{}filename\gt{} w/a] + \index[general]{@tee \lt{}filename\gt{} w/a} + Send all subsequent output to both the specified file and the terminal. It is + turned off by specifying {\bf @tee} or {\bf @output} without a filename. + +\item [@sleep \lt{}seconds\gt{}] + \index[general]{@sleep \lt{}seconds\gt{}} + Sleep the specified number of seconds. + +\item [@time] + \index[general]{@time} + Print the current time and date. + +\item [@version] + \index[general]{@version} + Print the console's version. + +\item [@quit] + \index[general]{@quit} + quit + +\item [@exit] + \index[general]{@exit} + quit + +\item [@\# anything] + \index[general]{anything} + Comment +\end{description} + +\label{scripting} + +\section{Running the Console from a Shell Script} +\index[general]{Script!Running the Console Program from a Shell} +\index[general]{Running the Console Program from a Shell Script} + +You can automate many Console tasks by running the console program from a +shell script. For example, if you have created a file containing the following +commands: + +\footnotesize +\begin{verbatim} + ./bconsole -c ./bconsole.conf <) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/console/console.css b/docs/manuals/de/console/console.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/de/console/console.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/de/console/console.tex b/docs/manuals/de/console/console.tex new file mode 100644 index 00000000..69ce36a3 --- /dev/null +++ b/docs/manuals/de/console/console.tex @@ -0,0 +1,78 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Console and Operators Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{bconsole} +\include{gui} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/de/console/do_echo b/docs/manuals/de/console/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/de/console/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/de/console/fdl.tex b/docs/manuals/de/console/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/de/console/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/console/fix_tex.pl b/docs/manuals/de/console/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/console/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/console/index.perl b/docs/manuals/de/console/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/console/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/console/latex2html-init.pl b/docs/manuals/de/console/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/console/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/console/setup.sm b/docs/manuals/de/console/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/console/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/console/translate_images.pl b/docs/manuals/de/console/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/console/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/console/update_version b/docs/manuals/de/console/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/console/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/console/update_version.in b/docs/manuals/de/console/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/console/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/console/version.tex b/docs/manuals/de/console/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/console/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/console/version.tex.in b/docs/manuals/de/console/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/console/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/de/developers/Makefile b/docs/manuals/de/developers/Makefile new file mode 100644 index 00000000..eb2c5f0f --- /dev/null +++ b/docs/manuals/de/developers/Makefile @@ -0,0 +1,106 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# + +IMAGES=../../../images + +DOC=developers + +first_rule: all + +all: tex web html dvipdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + touch ${DOC}.idx ${DOC}i-general.tex + -latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode ${DOC}.tex + @rm -f *.eps *.old + +pdf: + @echo "Making ${DOC} pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making ${DOC} pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + @rm -f *.eps *.old + +html: + @echo "Making ${DOC} html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @touch ${DOC}.html + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}.html + @rm -f *.eps *.gif *.jpg *.old + +web: + @echo "Making ${DOC} web" + @mkdir -p ${DOC} + @rm -f ${DOC}/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @(if [ -f ${DOC}/imagename_translations ] ; then \ + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html; \ + fi) + @rm -rf ${DOC}/*.html + latex2html -split 4 -local_icons -t "Developer's Guide" -long_titles 4 \ + -contents_in_nav -toc_stars -white -notransparent ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html + @cp -f ${DOC}/${DOC}_Guide.html ${DOC}/index.html + @rm -f *.eps *.gif *.jpg ${DOC}/*.eps *.old + @rm -f ${DOC}/idle.png + @rm -f ${DOC}/win32-*.png ${DOC}/wx-console*.png ${DOC}/xp-*.png + @rm -f ${DOC}/*.pl ${DOC}/*.log ${DOC}/*.aux ${DOC}/*.idx + @rm -f ${DOC}/*.out WARNINGS + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf ${DOC} + @rm -f images.tex ${DOC}i.tex + @rm -f ${DOC}i-*.tex + + +distclean: clean + @rm -f ${DOC}.html ${DOC}.pdf + @rm -f Makefile version.tex diff --git a/docs/manuals/de/developers/Makefile.in b/docs/manuals/de/developers/Makefile.in new file mode 100644 index 00000000..eb2c5f0f --- /dev/null +++ b/docs/manuals/de/developers/Makefile.in @@ -0,0 +1,106 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# + +IMAGES=../../../images + +DOC=developers + +first_rule: all + +all: tex web html dvipdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + touch ${DOC}.idx ${DOC}i-general.tex + -latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode ${DOC}.tex + @rm -f *.eps *.old + +pdf: + @echo "Making ${DOC} pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making ${DOC} pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + @rm -f *.eps *.old + +html: + @echo "Making ${DOC} html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @touch ${DOC}.html + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}.html + @rm -f *.eps *.gif *.jpg *.old + +web: + @echo "Making ${DOC} web" + @mkdir -p ${DOC} + @rm -f ${DOC}/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @(if [ -f ${DOC}/imagename_translations ] ; then \ + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html; \ + fi) + @rm -rf ${DOC}/*.html + latex2html -split 4 -local_icons -t "Developer's Guide" -long_titles 4 \ + -contents_in_nav -toc_stars -white -notransparent ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html + @cp -f ${DOC}/${DOC}_Guide.html ${DOC}/index.html + @rm -f *.eps *.gif *.jpg ${DOC}/*.eps *.old + @rm -f ${DOC}/idle.png + @rm -f ${DOC}/win32-*.png ${DOC}/wx-console*.png ${DOC}/xp-*.png + @rm -f ${DOC}/*.pl ${DOC}/*.log ${DOC}/*.aux ${DOC}/*.idx + @rm -f ${DOC}/*.out WARNINGS + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf ${DOC} + @rm -f images.tex ${DOC}i.tex + @rm -f ${DOC}i-*.tex + + +distclean: clean + @rm -f ${DOC}.html ${DOC}.pdf + @rm -f Makefile version.tex diff --git a/docs/manuals/de/developers/catalog.tex b/docs/manuals/de/developers/catalog.tex new file mode 100644 index 00000000..f67866b5 --- /dev/null +++ b/docs/manuals/de/developers/catalog.tex @@ -0,0 +1,939 @@ +%% +%% + +\chapter{Catalog Services} +\label{_ChapterStart30} +\index[general]{Services!Catalog } +\index[general]{Catalog Services } + +\section{General} +\index[general]{General } +\addcontentsline{toc}{subsection}{General} + +This chapter is intended to be a technical discussion of the Catalog services +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +The {\bf Bacula Catalog} services consist of the programs that provide the SQL +database engine for storage and retrieval of all information concerning files +that were backed up and their locations on the storage media. + +We have investigated the possibility of using the following SQL engines for +Bacula: Beagle, mSQL, GNU SQL, PostgreSQL, SQLite, Oracle, and MySQL. Each +presents certain problems with either licensing or maturity. At present, we +have chosen for development purposes to use MySQL, PostgreSQL and SQLite. +MySQL was chosen because it is fast, proven to be reliable, widely used, and +actively being developed. MySQL is released under the GNU GPL license. +PostgreSQL was chosen because it is a full-featured, very mature database, and +because Dan Langille did the Bacula driver for it. PostgreSQL is distributed +under the BSD license. SQLite was chosen because it is small, efficient, and +can be directly embedded in {\bf Bacula} thus requiring much less effort from +the system administrator or person building {\bf Bacula}. In our testing +SQLite has performed very well, and for the functions that we use, it has +never encountered any errors except that it does not appear to handle +databases larger than 2GBytes. That said, we would not recommend it for +serious production use. + +The Bacula SQL code has been written in a manner that will allow it to be +easily modified to support any of the current SQL database systems on the +market (for example: mSQL, iODBC, unixODBC, Solid, OpenLink ODBC, EasySoft +ODBC, InterBase, Oracle8, Oracle7, and DB2). + +If you do not specify either {\bf \verb{--{with-mysql} or {\bf \verb{--{with-postgresql} or +{\bf \verb{--{with-sqlite} on the ./configure line, Bacula will use its minimalist +internal database. This database is kept for build reasons but is no longer +supported. Bacula {\bf requires} one of the three databases (MySQL, +PostgreSQL, or SQLite) to run. + +\subsection{Filenames and Maximum Filename Length} +\index[general]{Filenames and Maximum Filename Length } +\index[general]{Length!Filenames and Maximum Filename } +\addcontentsline{toc}{subsubsection}{Filenames and Maximum Filename Length} + +In general, either MySQL, PostgreSQL or SQLite permit storing arbitrary long +path names and file names in the catalog database. In practice, there still +may be one or two places in the Catalog interface code that restrict the +maximum path length to 512 characters and the maximum file name length to 512 +characters. These restrictions are believed to have been removed. Please note, +these restrictions apply only to the Catalog database and thus to your ability +to list online the files saved during any job. All information received and +stored by the Storage daemon (normally on tape) allows and handles arbitrarily +long path and filenames. + +\subsection{Installing and Configuring MySQL} +\index[general]{MySQL!Installing and Configuring } +\index[general]{Installing and Configuring MySQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL} + +For the details of installing and configuring MySQL, please see the +\ilink{Installing and Configuring MySQL}{_ChapterStart} chapter of +this manual. + +\subsection{Installing and Configuring PostgreSQL} +\index[general]{PostgreSQL!Installing and Configuring } +\index[general]{Installing and Configuring PostgreSQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL} + +For the details of installing and configuring PostgreSQL, please see the +\ilink{Installing and Configuring PostgreSQL}{_ChapterStart10} +chapter of this manual. + +\subsection{Installing and Configuring SQLite} +\index[general]{Installing and Configuring SQLite } +\index[general]{SQLite!Installing and Configuring } +\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite} + +For the details of installing and configuring SQLite, please see the +\ilink{Installing and Configuring SQLite}{_ChapterStart33} chapter of +this manual. + +\subsection{Internal Bacula Catalog} +\index[general]{Catalog!Internal Bacula } +\index[general]{Internal Bacula Catalog } +\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog} + +Please see the +\ilink{Internal Bacula Database}{_ChapterStart42} chapter of this +manual for more details. + +\subsection{Database Table Design} +\index[general]{Design!Database Table } +\index[general]{Database Table Design } +\addcontentsline{toc}{subsubsection}{Database Table Design} + +All discussions that follow pertain to the MySQL database. The details for the +PostgreSQL and SQLite databases are essentially identical except for that all +fields in the SQLite database are stored as ASCII text and some of the +database creation statements are a bit different. The details of the internal +Bacula catalog are not discussed here. + +Because the Catalog database may contain very large amounts of data for large +sites, we have made a modest attempt to normalize the data tables to reduce +redundant information. While reducing the size of the database significantly, +it does, unfortunately, add some complications to the structures. + +In simple terms, the Catalog database must contain a record of all Jobs run by +Bacula, and for each Job, it must maintain a list of all files saved, with +their File Attributes (permissions, create date, ...), and the location and +Media on which the file is stored. This is seemingly a simple task, but it +represents a huge amount interlinked data. Note: the list of files and their +attributes is not maintained when using the internal Bacula database. The data +stored in the File records, which allows the user or administrator to obtain a +list of all files backed up during a job, is by far the largest volume of +information put into the Catalog database. + +Although the Catalog database has been designed to handle backup data for +multiple clients, some users may want to maintain multiple databases, one for +each machine to be backed up. This reduces the risk of confusion of accidental +restoring a file to the wrong machine as well as reducing the amount of data +in a single database, thus increasing efficiency and reducing the impact of a +lost or damaged database. + +\section{Sequence of Creation of Records for a Save Job} +\index[general]{Sequence of Creation of Records for a Save Job } +\index[general]{Job!Sequence of Creation of Records for a Save } +\addcontentsline{toc}{subsection}{Sequence of Creation of Records for a Save +Job} + +Start with StartDate, ClientName, Filename, Path, Attributes, MediaName, +MediaCoordinates. (PartNumber, NumParts). In the steps below, ``Create new'' +means to create a new record whether or not it is unique. ``Create unique'' +means each record in the database should be unique. Thus, one must first +search to see if the record exists, and only if not should a new one be +created, otherwise the existing RecordId should be used. + +\begin{enumerate} +\item Create new Job record with StartDate; save JobId +\item Create unique Media record; save MediaId +\item Create unique Client record; save ClientId +\item Create unique Filename record; save FilenameId +\item Create unique Path record; save PathId +\item Create unique Attribute record; save AttributeId + store ClientId, FilenameId, PathId, and Attributes +\item Create new File record + store JobId, AttributeId, MediaCoordinates, etc +\item Repeat steps 4 through 8 for each file +\item Create a JobMedia record; save MediaId +\item Update Job record filling in EndDate and other Job statistics + \end{enumerate} + +\section{Database Tables} +\index[general]{Database Tables } +\index[general]{Tables!Database } +\addcontentsline{toc}{subsection}{Database Tables} + +\addcontentsline{lot}{table}{Filename Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Filename } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{l| }{\bf Data Type } +& \multicolumn{1}{l| }{\bf Remark } \\ + \hline +{FilenameId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Blob } & {Filename } +\\ \hline + +\end{longtable} + +The {\bf Filename} table shown above contains the name of each file backed up +with the path removed. If different directories or machines contain the same +filename, only one copy will be saved in this table. + +\ + +\addcontentsline{lot}{table}{Path Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Path } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PathId } & {integer } & {Primary Key } \\ + \hline +{Path } & {Blob } & {Full Path } +\\ \hline + +\end{longtable} + +The {\bf Path} table contains shown above the path or directory names of all +directories on the system or systems. The filename and any MSDOS disk name are +stripped off. As with the filename, only one copy of each directory name is +kept regardless of how many machines or drives have the same directory. These +path names should be stored in Unix path name format. + +Some simple testing on a Linux file system indicates that separating the +filename and the path may be more complication than is warranted by the space +savings. For example, this system has a total of 89,097 files, 60,467 of which +have unique filenames, and there are 4,374 unique paths. + +Finding all those files and doing two stats() per file takes an average wall +clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux. + +Finding all those files and putting them directly into a MySQL database with +the path and filename defined as TEXT, which is variable length up to 65,535 +characters takes 19 mins 31 seconds and creates a 27.6 MByte database. + +Doing the same thing, but inserting them into Blob fields with the filename +indexed on the first 30 characters and the path name indexed on the 255 (max) +characters takes 5 mins 18 seconds and creates a 5.24 MB database. Rerunning +the job (with the database already created) takes about 2 mins 50 seconds. + +Running the same as the last one (Path and Filename Blob), but Filename +indexed on the first 30 characters and the Path on the first 50 characters +(linear search done there after) takes 5 mins on the average and creates a 3.4 +MB database. Rerunning with the data already in the DB takes 3 mins 35 +seconds. + +Finally, saving only the full path name rather than splitting the path and the +file, and indexing it on the first 50 characters takes 6 mins 43 seconds and +creates a 7.35 MB database. + +\ + +\addcontentsline{lot}{table}{File Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf File } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileId } & {integer } & {Primary Key } \\ + \hline +{FileIndex } & {integer } & {The sequential file number in the Job } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{PathId } & {integer } & {Link to Path Record } \\ + \hline +{FilenameId } & {integer } & {Link to Filename Record } \\ + \hline +{MarkId } & {integer } & {Used to mark files during Verify Jobs } \\ + \hline +{LStat } & {tinyblob } & {File attributes in base64 encoding } \\ + \hline +{MD5 } & {tinyblob } & {MD5 signature in base64 encoding } +\\ \hline + +\end{longtable} + +The {\bf File} table shown above contains one entry for each file backed up by +Bacula. Thus a file that is backed up multiple times (as is normal) will have +multiple entries in the File table. This will probably be the table with the +most number of records. Consequently, it is essential to keep the size of this +record to an absolute minimum. At the same time, this table must contain all +the information (or pointers to the information) about the file and where it +is backed up. Since a file may be backed up many times without having changed, +the path and filename are stored in separate tables. + +This table contains by far the largest amount of information in the Catalog +database, both from the stand point of number of records, and the stand point +of total database size. As a consequence, the user must take care to +periodically reduce the number of File records using the {\bf retention} +command in the Console program. + +\ + +\addcontentsline{lot}{table}{Job Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf Job } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobId } & {integer } & {Primary Key } \\ + \hline +{Job } & {tinyblob } & {Unique Job Name } \\ + \hline +{Name } & {tinyblob } & {Job Name } \\ + \hline +{PurgedFiles } & {tinyint } & {Used by Bacula for purging/retention periods +} \\ + \hline +{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration +} \\ + \hline +{Level } & {binary(1) } & {Job Level } \\ + \hline +{ClientId } & {integer } & {Client index } \\ + \hline +{JobStatus } & {binary(1) } & {Job Termination Status } \\ + \hline +{SchedTime } & {datetime } & {Time/date when Job scheduled } \\ + \hline +{StartTime } & {datetime } & {Time/date when Job started } \\ + \hline +{EndTime } & {datetime } & {Time/date when Job ended } \\ + \hline +{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for +Retention period. } \\ + \hline +{VolSessionId } & {integer } & {Unique Volume Session ID } \\ + \hline +{VolSessionTime } & {integer } & {Unique Volume Session Time } \\ + \hline +{JobFiles } & {integer } & {Number of files saved in Job } \\ + \hline +{JobBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{JobErrors } & {integer } & {Number of errors during Job } \\ + \hline +{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) } +\\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{FileSetId } & {integer } & {Link to FileSet Record } \\ + \hline +{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\ + \hline +{HasBase } & {tiny integer } & {Set when Base Job run } +\\ \hline + +\end{longtable} + +The {\bf Job} table contains one record for each Job run by Bacula. Thus +normally, there will be one per day per machine added to the database. Note, +the JobId is used to index Job records in the database, and it often is shown +to the user in the Console program. However, care must be taken with its use +as it is not unique from database to database. For example, the user may have +a database for Client data saved on machine Rufus and another database for +Client data saved on machine Roxie. In this case, the two database will each +have JobIds that match those in another database. For a unique reference to a +Job, see Job below. + +The Name field of the Job record corresponds to the Name resource record given +in the Director's configuration file. Thus it is a generic name, and it will +be normal to find many Jobs (or even all Jobs) with the same Name. + +The Job field contains a combination of the Name and the schedule time of the +Job by the Director. Thus for a given Director, even with multiple Catalog +databases, the Job will contain a unique name that represents the Job. + +For a given Storage daemon, the VolSessionId and VolSessionTime form a unique +identification of the Job. This will be the case even if multiple Directors +are using the same Storage daemon. + +The Job Type (or simply Type) can have one of the following values: + +\addcontentsline{lot}{table}{Job Types} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{B } & {Backup Job } \\ + \hline +{V } & {Verify Job } \\ + \hline +{R } & {Restore Job } \\ + \hline +{C } & {Console program (not in database) } \\ + \hline +{D } & {Admin Job } \\ + \hline +{A } & {Archive Job (not implemented) } +\\ \hline + +\end{longtable} + +The JobStatus field specifies how the job terminated, and can be one of the +following: + +\addcontentsline{lot}{table}{Job Statuses} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{C } & {Created but not yet running } \\ + \hline +{R } & {Running } \\ + \hline +{B } & {Blocked } \\ + \hline +{T } & {Terminated normally } \\ + \hline +{E } & {Terminated in Error } \\ + \hline +{e } & {Non-fatal error } \\ + \hline +{f } & {Fatal error } \\ + \hline +{D } & {Verify Differences } \\ + \hline +{A } & {Canceled by the user } \\ + \hline +{F } & {Waiting on the File daemon } \\ + \hline +{S } & {Waiting on the Storage daemon } \\ + \hline +{m } & {Waiting for a new Volume to be mounted } \\ + \hline +{M } & {Waiting for a Mount } \\ + \hline +{s } & {Waiting for Storage resource } \\ + \hline +{j } & {Waiting for Job resource } \\ + \hline +{c } & {Waiting for Client resource } \\ + \hline +{d } & {Wating for Maximum jobs } \\ + \hline +{t } & {Waiting for Start Time } \\ + \hline +{p } & {Waiting for higher priority job to finish } +\\ \hline + +\end{longtable} + +\ + +\addcontentsline{lot}{table}{File Sets Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf FileSet } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileSetId } & {integer } & {Primary Key } \\ + \hline +{FileSet } & {tinyblob } & {FileSet name } \\ + \hline +{MD5 } & {tinyblob } & {MD5 checksum of FileSet } \\ + \hline +{CreateTime } & {datetime } & {Time and date Fileset created } +\\ \hline + +\end{longtable} + +The {\bf FileSet} table contains one entry for each FileSet that is used. The +MD5 signature is kept to ensure that if the user changes anything inside the +FileSet, it will be detected and the new FileSet will be used. This is +particularly important when doing an incremental update. If the user deletes a +file or adds a file, we need to ensure that a Full backup is done prior to the +next incremental. + +\ + +\addcontentsline{lot}{table}{JobMedia Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf JobMedia } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobMediaId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{MediaId } & {integer } & {Link to Media Record } \\ + \hline +{FirstIndex } & {integer } & {The index (sequence number) of the first file +written for this Job to the Media } \\ + \hline +{LastIndex } & {integer } & {The index of the last file written for this +Job to the Media } \\ + \hline +{StartFile } & {integer } & {The physical media (tape) file number of the +first block written for this Job } \\ + \hline +{EndFile } & {integer } & {The physical media (tape) file number of the +last block written for this Job } \\ + \hline +{StartBlock } & {integer } & {The number of the first block written for +this Job } \\ + \hline +{EndBlock } & {integer } & {The number of the last block written for this +Job } \\ + \hline +{VolIndex } & {integer } & {The Volume use sequence number within the Job } +\\ \hline + +\end{longtable} + +The {\bf JobMedia} table contains one entry at the following: start of +the job, start of each new tape file, start of each new tape, end of the +job. Since by default, a new tape file is written every 2GB, in general, +you will have more than 2 JobMedia records per Job. The number can be +varied by changing the "Maximum File Size" specified in the Device +resource. This record allows Bacula to efficiently position close to +(within 2GB) any given file in a backup. For restoring a full Job, +these records are not very important, but if you want to retrieve +a single file that was written near the end of a 100GB backup, the +JobMedia records can speed it up by orders of magnitude by permitting +forward spacing files and blocks rather than reading the whole 100GB +backup. + + + +\ + +\addcontentsline{lot}{table}{Media Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Media } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{MediaId } & {integer } & {Primary Key } \\ + \hline +{VolumeName } & {tinyblob } & {Volume name } \\ + \hline +{Slot } & {integer } & {Autochanger Slot number or zero } \\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{MediaType } & {tinyblob } & {The MediaType supplied by the user } \\ + \hline +{FirstWritten } & {datetime } & {Time/date when first written } \\ + \hline +{LastWritten } & {datetime } & {Time/date when last written } \\ + \hline +{LabelDate } & {datetime } & {Time/date when tape labeled } \\ + \hline +{VolJobs } & {integer } & {Number of jobs written to this media } \\ + \hline +{VolFiles } & {integer } & {Number of files written to this media } \\ + \hline +{VolBlocks } & {integer } & {Number of blocks written to this media } \\ + \hline +{VolMounts } & {integer } & {Number of time media mounted } \\ + \hline +{VolBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{VolErrors } & {integer } & {Number of errors during Job } \\ + \hline +{VolWrites } & {integer } & {Number of writes to media } \\ + \hline +{MaxVolBytes } & {bigint } & {Maximum bytes to put on this media } \\ + \hline +{VolCapacityBytes } & {bigint } & {Capacity estimate for this volume } \\ + \hline +{VolStatus } & {enum } & {Status of media: Full, Archive, Append, Recycle, +Read-Only, Disabled, Error, Busy } \\ + \hline +{Recycle } & {tinyint } & {Whether or not Bacula can recycle the Volumes: +Yes, No } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds until expiration } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {maximum jobs to put on Volume } \\ + \hline +{MaxVolFiles } & {integer } & {maximume EOF marks to put on Volume } +\\ \hline + +\end{longtable} + +The {\bf Volume} table (internally referred to as the Media table) contains +one entry for each volume, that is each tape, cassette (8mm, DLT, DAT, ...), +or file on which information is or was backed up. There is one Volume record +created for each of the NumVols specified in the Pool resource record. + +\ + +\addcontentsline{lot}{table}{Pool Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Pool } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PoolId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Tinyblob } & {Pool Name } \\ + \hline +{NumVols } & {Integer } & {Number of Volumes in the Pool } \\ + \hline +{MaxVols } & {Integer } & {Maximum Volumes in the Pool } \\ + \hline +{UseOnce } & {tinyint } & {Use volume once } \\ + \hline +{UseCatalog } & {tinyint } & {Set to use catalog } \\ + \hline +{AcceptAnyVolume } & {tinyint } & {Accept any volume from Pool } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds to retain volume } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {max jobs on volume } \\ + \hline +{MaxVolFiles } & {integer } & {max EOF marks to put on Volume } \\ + \hline +{MaxVolBytes } & {bigint } & {max bytes to write on Volume } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{Recycle } & {tinyint } & {yes|no for allowing auto recycling of Volume } +\\ + \hline +{PoolType } & {enum } & {Backup, Copy, Cloned, Archive, Migration } \\ + \hline +{LabelFormat } & {Tinyblob } & {Label format } +\\ \hline + +\end{longtable} + +The {\bf Pool} table contains one entry for each media pool controlled by +Bacula in this database. One media record exists for each of the NumVols +contained in the Pool. The PoolType is a Bacula defined keyword. The MediaType +is defined by the administrator, and corresponds to the MediaType specified in +the Director's Storage definition record. The CurrentVol is the sequence +number of the Media record for the current volume. + +\ + +\addcontentsline{lot}{table}{Client Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Client } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{ClientId } & {integer } & {Primary Key } \\ + \hline +{Name } & {TinyBlob } & {File Services Name } \\ + \hline +{UName } & {TinyBlob } & {uname -a from Client (not yet used) } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{FileRetention } & {bigint } & {64 bit seconds to retain Files } \\ + \hline +{JobRetention } & {bigint } & {64 bit seconds to retain Job } +\\ \hline + +\end{longtable} + +The {\bf Client} table contains one entry for each machine backed up by Bacula +in this database. Normally the Name is a fully qualified domain name. + +\ + +\addcontentsline{lot}{table}{Unsaved Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf UnsavedFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{UnsavedId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {JobId corresponding to this record } \\ + \hline +{PathId } & {integer } & {Id of path } \\ + \hline +{FilenameId } & {integer } & {Id of filename } +\\ \hline + +\end{longtable} + +The {\bf UnsavedFiles} table contains one entry for each file that was not +saved. Note! This record is not yet implemented. + +\ + +\addcontentsline{lot}{table}{Counter Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Counter } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{Counter } & {tinyblob } & {Counter name } \\ + \hline +{MinValue } & {integer } & {Start/Min value for counter } \\ + \hline +{MaxValue } & {integer } & {Max value for counter } \\ + \hline +{CurrentValue } & {integer } & {Current counter value } \\ + \hline +{WrapCounter } & {tinyblob } & {Name of another counter } +\\ \hline + +\end{longtable} + +The {\bf Counter} table contains one entry for each permanent counter defined +by the user. + +\ + +\addcontentsline{lot}{table}{Version Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Version } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{VersionId } & {integer } & {Primary Key } +\\ \hline + +\end{longtable} + +The {\bf Version} table defines the Bacula database version number. Bacula +checks this number before reading the database to ensure that it is compatible +with the Bacula binary file. + +\ + +\addcontentsline{lot}{table}{Base Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf BaseFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{BaseId } & {integer } & {Primary Key } \\ + \hline +{BaseJobId } & {integer } & {JobId of Base Job } \\ + \hline +{JobId } & {integer } & {Reference to Job } \\ + \hline +{FileId } & {integer } & {Reference to File } \\ + \hline +{FileIndex } & {integer } & {File Index number } +\\ \hline + +\end{longtable} + +The {\bf BaseFiles} table contains all the File references for a particular +JobId that point to a Base file -- i.e. they were previously saved and hence +were not saved in the current JobId but in BaseJobId under FileId. FileIndex +is the index of the file, and is used for optimization of Restore jobs to +prevent the need to read the FileId record when creating the in memory tree. +This record is not yet implemented. + +\ + +\subsection{MySQL Table Definition} +\index[general]{MySQL Table Definition } +\index[general]{Definition!MySQL Table } +\addcontentsline{toc}{subsubsection}{MySQL Table Definition} + +The commands used to create the MySQL tables are as follows: + +\footnotesize +\begin{verbatim} +USE bacula; +CREATE TABLE Filename ( + FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name BLOB NOT NULL, + PRIMARY KEY(FilenameId), + INDEX (Name(30)) + ); +CREATE TABLE Path ( + PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Path BLOB NOT NULL, + PRIMARY KEY(PathId), + INDEX (Path(50)) + ); +CREATE TABLE File ( + FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0, + LStat TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + PRIMARY KEY(FileId), + INDEX (JobId), + INDEX (PathId), + INDEX (FilenameId) + ); +CREATE TABLE Job ( + JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Job TINYBLOB NOT NULL, + Name TINYBLOB NOT NULL, + Type BINARY(1) NOT NULL, + Level BINARY(1) NOT NULL, + ClientId INTEGER NOT NULL REFERENCES Client, + JobStatus BINARY(1) NOT NULL, + SchedTime DATETIME NOT NULL, + StartTime DATETIME NOT NULL, + EndTime DATETIME NOT NULL, + JobTDate BIGINT UNSIGNED NOT NULL, + VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobBytes BIGINT UNSIGNED NOT NULL, + JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + FileSetId INTEGER UNSIGNED NOT NULL REFERENCES FileSet, + PurgedFiles TINYINT NOT NULL DEFAULT 0, + HasBase TINYINT NOT NULL DEFAULT 0, + PRIMARY KEY(JobId), + INDEX (Name(128)) + ); +CREATE TABLE FileSet ( + FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileSet TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + CreateTime DATETIME NOT NULL, + PRIMARY KEY(FileSetId) + ); +CREATE TABLE JobMedia ( + JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media, + FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(JobMediaId), + INDEX (JobId, MediaId) + ); +CREATE TABLE Media ( + MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + VolumeName TINYBLOB NOT NULL, + Slot INTEGER NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + MediaType TINYBLOB NOT NULL, + FirstWritten DATETIME NOT NULL, + LastWritten DATETIME NOT NULL, + LabelDate DATETIME NOT NULL, + VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolCapacityBytes BIGINT UNSIGNED NOT NULL, + VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged', + 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL, + Recycle TINYINT NOT NULL DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + InChanger TINYINT NOT NULL DEFAULT 0, + MediaAddressing TINYINT NOT NULL DEFAULT 0, + VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(MediaId), + INDEX (PoolId) + ); +CREATE TABLE Pool ( + PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + UseOnce TINYINT NOT NULL, + UseCatalog TINYINT NOT NULL, + AcceptAnyVolume TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL, + VolUseDuration BIGINT UNSIGNED NOT NULL, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL, + AutoPrune TINYINT DEFAULT 0, + Recycle TINYINT DEFAULT 0, + PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL, + LabelFormat TINYBLOB, + Enabled TINYINT DEFAULT 1, + ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + UNIQUE (Name(128)), + PRIMARY KEY (PoolId) + ); +CREATE TABLE Client ( + ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + Uname TINYBLOB NOT NULL, /* full uname -a of client */ + AutoPrune TINYINT DEFAULT 0, + FileRetention BIGINT UNSIGNED NOT NULL, + JobRetention BIGINT UNSIGNED NOT NULL, + UNIQUE (Name(128)), + PRIMARY KEY(ClientId) + ); +CREATE TABLE BaseFiles ( + BaseId INTEGER UNSIGNED AUTO_INCREMENT, + BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + FileId INTEGER UNSIGNED NOT NULL REFERENCES File, + FileIndex INTEGER UNSIGNED, + PRIMARY KEY(BaseId) + ); +CREATE TABLE UnsavedFiles ( + UnsavedId INTEGER UNSIGNED AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + PRIMARY KEY (UnsavedId) + ); +CREATE TABLE Version ( + VersionId INTEGER UNSIGNED NOT NULL + ); +-- Initialize Version +INSERT INTO Version (VersionId) VALUES (7); +CREATE TABLE Counters ( + Counter TINYBLOB NOT NULL, + MinValue INTEGER, + MaxValue INTEGER, + CurrentValue INTEGER, + WrapCounter TINYBLOB NOT NULL, + PRIMARY KEY (Counter(128)) + ); +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/developers/check_tex.pl b/docs/manuals/de/developers/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/developers/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/developers/daemonprotocol.tex b/docs/manuals/de/developers/daemonprotocol.tex new file mode 100644 index 00000000..0354bbd5 --- /dev/null +++ b/docs/manuals/de/developers/daemonprotocol.tex @@ -0,0 +1,284 @@ +%% +%% + +\chapter{Daemon Protocol} +\label{_ChapterStart2} +\index{Protocol!Daemon } +\index{Daemon Protocol } + +\section{General} +\index{General } +\addcontentsline{toc}{subsection}{General} + +This document describes the protocols used between the various daemons. As +Bacula has developed, it has become quite out of date. The general idea still +holds true, but the details of the fields for each command, and indeed the +commands themselves have changed considerably. + +It is intended to be a technical discussion of the general daemon protocols +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +\section{Low Level Network Protocol} +\index{Protocol!Low Level Network } +\index{Low Level Network Protocol } +\addcontentsline{toc}{subsection}{Low Level Network Protocol} + +At the lowest level, the network protocol is handled by {\bf BSOCK} packets +which contain a lot of information about the status of the network connection: +who is at the other end, etc. Each basic {\bf Bacula} network read or write +actually consists of two low level network read/writes. The first write always +sends four bytes of data in machine independent byte order. If data is to +follow, the first four bytes are a positive non-zero integer indicating the +length of the data that follow in the subsequent write. If the four byte +integer is zero or negative, it indicates a special request, a sort of network +signaling capability. In this case, no data packet will follow. The low level +BSOCK routines expect that only a single thread is accessing the socket at a +time. It is advised that multiple threads do not read/write the same socket. +If you must do this, you must provide some sort of locking mechanism. It would +not be appropriate for efficiency reasons to make every call to the BSOCK +routines lock and unlock the packet. + +\section{General Daemon Protocol} +\index{General Daemon Protocol } +\index{Protocol!General Daemon } +\addcontentsline{toc}{subsection}{General Daemon Protocol} + +In general, all the daemons follow the following global rules. There may be +exceptions depending on the specific case. Normally, one daemon will be +sending commands to another daemon (specifically, the Director to the Storage +daemon and the Director to the File daemon). + +\begin{itemize} +\item Commands are always ASCII commands that are upper/lower case dependent + as well as space sensitive. +\item All binary data is converted into ASCII (either with printf statements + or using base64 encoding). +\item All responses to commands sent are always prefixed with a return + numeric code where codes in the 1000's are reserved for the Director, the + 2000's are reserved for the File daemon, and the 3000's are reserved for the +Storage daemon. +\item Any response that is not prefixed with a numeric code is a command (or + subcommand if you like) coming from the other end. For example, while the + Director is corresponding with the Storage daemon, the Storage daemon can +request Catalog services from the Director. This convention permits each side +to send commands to the other daemon while simultaneously responding to +commands. +\item Any response that is of zero length, depending on the context, either + terminates the data stream being sent or terminates command mode prior to + closing the connection. +\item Any response that is of negative length is a special sign that normally + requires a response. For example, during data transfer from the File daemon + to the Storage daemon, normally the File daemon sends continuously without +intervening reads. However, periodically, the File daemon will send a packet +of length -1 indicating that the current data stream is complete and that the +Storage daemon should respond to the packet with an OK, ABORT JOB, PAUSE, +etc. This permits the File daemon to efficiently send data while at the same +time occasionally ``polling'' the Storage daemon for his status or any +special requests. + +Currently, these negative lengths are specific to the daemon, but shortly, +the range 0 to -999 will be standard daemon wide signals, while -1000 to +-1999 will be for Director user, -2000 to -2999 for the File daemon, and +-3000 to -3999 for the Storage daemon. +\end{itemize} + +\section{The Protocol Used Between the Director and the Storage Daemon} +\index{Daemon!Protocol Used Between the Director and the Storage } +\index{Protocol Used Between the Director and the Storage Daemon } +\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the +Storage Daemon} + +Before sending commands to the File daemon, the Director opens a Message +channel with the Storage daemon, identifies itself and presents its password. +If the password check is OK, the Storage daemon accepts the Director. The +Director then passes the Storage daemon, the JobId to be run as well as the +File daemon authorization (append, read all, or read for a specific session). +The Storage daemon will then pass back to the Director a enabling key for this +JobId that must be presented by the File daemon when opening the job. Until +this process is complete, the Storage daemon is not available for use by File +daemons. + +\footnotesize +\begin{verbatim} +SD: listens +DR: makes connection +DR: Hello calling +SD: 3000 OK Hello +DR: JobId=nnn Allow=(append, read) Session=(*, SessionId) + (Session not implemented yet) +SD: 3000 OK Job Authorization= +DR: use device= media_type= + pool_name= pool_type= +SD: 3000 OK use device +\end{verbatim} +\normalsize + +For the Director to be authorized, the \lt{}Director-name\gt{} and the +\lt{}password\gt{} must match the values in one of the Storage daemon's +Director resources (there may be several Directors that can access a single +Storage daemon). + +\section{The Protocol Used Between the Director and the File Daemon} +\index{Daemon!Protocol Used Between the Director and the File } +\index{Protocol Used Between the Director and the File Daemon } +\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the +File Daemon} + +A typical conversation might look like the following: + +\footnotesize +\begin{verbatim} +FD: listens +DR: makes connection +DR: Hello calling +FD: 2000 OK Hello +DR: JobId=nnn Authorization= +FD: 2000 OK Job +DR: storage address = port = + name = mediatype = +FD: 2000 OK storage +DR: include +DR: +DR: + ... +DR: Null packet +FD: 2000 OK include +DR: exclude +DR: +DR: + ... +DR: Null packet +FD: 2000 OK exclude +DR: full +FD: 2000 OK full +DR: save +FD: 2000 OK save +FD: Attribute record for each file as sent to the + Storage daemon (described above). +FD: Null packet +FD: + e.g. + 3000 OK Volumes = + 3001 Volume = + + 3002 Volume data = + + ... additional Volume / Volume data pairs for volumes 2 .. n +FD: Null packet +FD: close socket +\end{verbatim} +\normalsize + +\section{The Save Protocol Between the File Daemon and the Storage Daemon} +\index{Save Protocol Between the File Daemon and the Storage Daemon } +\index{Daemon!Save Protocol Between the File Daemon and the Storage } +\addcontentsline{toc}{subsection}{Save Protocol Between the File Daemon and +the Storage Daemon} + +Once the Director has send a {\bf save} command to the File daemon, the File +daemon will contact the Storage daemon to begin the save. + +In what follows: FD: refers to information set via the network from the File +daemon to the Storage daemon, and SD: refers to information set from the +Storage daemon to the File daemon. + +\subsection{Command and Control Information} +\index{Information!Command and Control } +\index{Command and Control Information } +\addcontentsline{toc}{subsubsection}{Command and Control Information} + +Command and control information is exchanged in human readable ASCII commands. + + +\footnotesize +\begin{verbatim} +FD: listens +SD: makes connection +FD: append open session = [] +SD: 3000 OK ticket = +FD: append data +SD: 3000 OK data address = port = +\end{verbatim} +\normalsize + +\subsection{Data Information} +\index{Information!Data } +\index{Data Information } +\addcontentsline{toc}{subsubsection}{Data Information} + +The Data information consists of the file attributes and data to the Storage +daemon. For the most part, the data information is sent one way: from the File +daemon to the Storage daemon. This allows the File daemon to transfer +information as fast as possible without a lot of handshaking and network +overhead. + +However, from time to time, the File daemon needs to do a sort of checkpoint +of the situation to ensure that everything is going well with the Storage +daemon. To do so, the File daemon sends a packet with a negative length +indicating that he wishes the Storage daemon to respond by sending a packet of +information to the File daemon. The File daemon then waits to receive a packet +from the Storage daemon before continuing. + +All data sent are in binary format except for the header packet, which is in +ASCII. There are two packet types used data transfer mode: a header packet, +the contents of which are known to the Storage daemon, and a data packet, the +contents of which are never examined by the Storage daemon. + +The first data packet to the Storage daemon will be an ASCII header packet +consisting of the following data. + +\lt{}File-Index\gt{} \lt{}Stream-Id\gt{} \lt{}Info\gt{} where {\bf +\lt{}File-Index\gt{}} is a sequential number beginning from one that +increments with each file (or directory) sent. + +where {\bf \lt{}Stream-Id\gt{}} will be 1 for the Attributes record and 2 for +uncompressed File data. 3 is reserved for the MD5 signature for the file. + +where {\bf \lt{}Info\gt{}} transmit information about the Stream to the +Storage Daemon. It is a character string field where each character has a +meaning. The only character currently defined is 0 (zero), which is simply a +place holder (a no op). In the future, there may be codes indicating +compressed data, encrypted data, etc. + +Immediately following the header packet, the Storage daemon will expect any +number of data packets. The series of data packets is terminated by a zero +length packet, which indicates to the Storage daemon that the next packet will +be another header packet. As previously mentioned, a negative length packet is +a request for the Storage daemon to temporarily enter command mode and send a +reply to the File daemon. Thus an actual conversation might contain the +following exchanges: + +\footnotesize +\begin{verbatim} +FD: <1 1 0> (header packet) +FD: +FD: Null packet +FD: <1 2 0> +FD: +FD: Packet length = -1 +SD: 3000 OK +FD: <2 1 0> +FD: +FD: Null packet +FD: <2 2 0> +FD: +FD: Null packet +FD: Null packet +FD: append end session +SD: 3000 OK end +FD: append close session +SD: 3000 OK Volumes = +SD: 3001 Volume = + +SD: 3002 Volume data = + +SD: ... additional Volume / Volume data pairs for + volumes 2 .. n +FD: close socket +\end{verbatim} +\normalsize + +The information returned to the File daemon by the Storage daemon in response +to the {\bf append close session} is transmit in turn to the Director. diff --git a/docs/manuals/de/developers/developers.css b/docs/manuals/de/developers/developers.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/de/developers/developers.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/de/developers/developers.tex b/docs/manuals/de/developers/developers.tex new file mode 100644 index 00000000..840b1a0a --- /dev/null +++ b/docs/manuals/de/developers/developers.tex @@ -0,0 +1,88 @@ +%% +%% + +\documentclass[11pt,a4paper]{report} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Developers' Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{generaldevel} +\include{platformsupport} +\include{daemonprotocol} +\include{director} +\include{file} +\include{storage} +\include{catalog} +\include{mediaformat} +\include{porting} +\include{gui-interface} +\include{tls-techdoc} +\include{regression} +\include{md5} +\include{mempool} +\include{netprotocol} +\include{smartall} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex + +\end{document} diff --git a/docs/manuals/de/developers/director.tex b/docs/manuals/de/developers/director.tex new file mode 100644 index 00000000..d8c4cd0f --- /dev/null +++ b/docs/manuals/de/developers/director.tex @@ -0,0 +1,18 @@ +%% +%% + +\chapter{Director Services Daemon} +\label{_ChapterStart6} +\index{Daemon!Director Services } +\index{Director Services Daemon } +\addcontentsline{toc}{section}{Director Services Daemon} + +This chapter is intended to be a technical discussion of the Director services +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +The {\bf Bacula Director} services consist of the program that supervises all +the backup and restore operations. + +To be written ... diff --git a/docs/manuals/de/developers/fdl.tex b/docs/manuals/de/developers/fdl.tex new file mode 100644 index 00000000..9304bb60 --- /dev/null +++ b/docs/manuals/de/developers/fdl.tex @@ -0,0 +1,511 @@ +%---------The file header--------------------------------------------- + +%% \usepackage[english]{babel} %language selection +%% \usepackage[T1]{fontenc} + +%%\pagenumbering{arabic} + +%% \usepackage{hyperref} +%% \hypersetup{colorlinks, +%% citecolor=black, +%% filecolor=black, +%% linkcolor=black, +%% urlcolor=black, +%% pdftex} + + +%--------------------------------------------------------------------- +\chapter{GNU Free Documentation License} +\index[general]{GNU ree Documentation License} +\index[general]{License!GNU ree Documentation} +\addcontentsline{toc}{section}{GNU ree Documentation License} + +%\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\addcontentsline{toc}{section}{1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\addcontentsline{toc}{section}{2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\addcontentsline{toc}{section}{3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\addcontentsline{toc}{section}{4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\addcontentsline{toc}{section}{5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\addcontentsline{toc}{section}{6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\addcontentsline{toc}{section}{7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\addcontentsline{toc}{section}{8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\addcontentsline{toc}{section}{9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\addcontentsline{toc}{section}{10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +\addcontentsline{toc}{section}{ADDENDUM: How to use this License for your documents} +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/developers/file.tex b/docs/manuals/de/developers/file.tex new file mode 100644 index 00000000..ee89577b --- /dev/null +++ b/docs/manuals/de/developers/file.tex @@ -0,0 +1,68 @@ +%% +%% + +\chapter{File Services Daemon} +\label{_ChapterStart11} +\index{File Services Daemon } +\index{Daemon!File Services } +\addcontentsline{toc}{section}{File Services Daemon} + +Please note, this section is somewhat out of date as the code has evolved +significantly. The basic idea has not changed though. + +This chapter is intended to be a technical discussion of the File daemon +services and as such is not targeted at end users but rather at developers and +system administrators that want or need to know more of the working details of +{\bf Bacula}. + +The {\bf Bacula File Services} consist of the programs that run on the system +to be backed up and provide the interface between the Host File system and +Bacula -- in particular, the Director and the Storage services. + +When time comes for a backup, the Director gets in touch with the File daemon +on the client machine and hands it a set of ``marching orders'' which, if +written in English, might be something like the following: + +OK, {\bf File daemon}, it's time for your daily incremental backup. I want you +to get in touch with the Storage daemon on host archive.mysite.com and perform +the following save operations with the designated options. You'll note that +I've attached include and exclude lists and patterns you should apply when +backing up the file system. As this is an incremental backup, you should save +only files modified since the time you started your last backup which, as you +may recall, was 2000-11-19-06:43:38. Please let me know when you're done and +how it went. Thank you. + +So, having been handed everything it needs to decide what to dump and where to +store it, the File daemon doesn't need to have any further contact with the +Director until the backup is complete providing there are no errors. If there +are errors, the error messages will be delivered immediately to the Director. +While the backup is proceeding, the File daemon will send the file coordinates +and data for each file being backed up to the Storage daemon, which will in +turn pass the file coordinates to the Director to put in the catalog. + +During a {\bf Verify} of the catalog, the situation is different, since the +File daemon will have an exchange with the Director for each file, and will +not contact the Storage daemon. + +A {\bf Restore} operation will be very similar to the {\bf Backup} except that +during the {\bf Restore} the Storage daemon will not send storage coordinates +to the Director since the Director presumably already has them. On the other +hand, any error messages from either the Storage daemon or File daemon will +normally be sent directly to the Directory (this, of course, depends on how +the Message resource is defined). + +\section{Commands Received from the Director for a Backup} +\index{Backup!Commands Received from the Director for a } +\index{Commands Received from the Director for a Backup } +\addcontentsline{toc}{subsection}{Commands Received from the Director for a +Backup} + +To be written ... + +\section{Commands Received from the Director for a Restore} +\index{Commands Received from the Director for a Restore } +\index{Restore!Commands Received from the Director for a } +\addcontentsline{toc}{subsection}{Commands Received from the Director for a +Restore} + +To be written ... diff --git a/docs/manuals/de/developers/fix_tex.pl b/docs/manuals/de/developers/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/developers/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/developers/generaldevel.tex b/docs/manuals/de/developers/generaldevel.tex new file mode 100644 index 00000000..9404e57e --- /dev/null +++ b/docs/manuals/de/developers/generaldevel.tex @@ -0,0 +1,1403 @@ +%% +%% + +\chapter{Bacula Developer Notes} +\label{_ChapterStart10} +\index{Bacula Developer Notes} +\index{Notes!Bacula Developer} +\addcontentsline{toc}{section}{Bacula Developer Notes} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document is intended mostly for developers and describes the the general +framework of making Bacula source changes. + +\subsection{Contributions} +\index{Contributions} +\addcontentsline{toc}{subsubsection}{Contributions} + +Contributions from programmers are broken into two groups. The first are +contributions that are aids and not essential to Bacula. In general, these +will be scripts or will go into and examples or contributions directory. +For these kinds of non-essential contributions there is no obligation to do +a copyright assignment as described below. However, a copyright assignment +would still be appreciated. + +The second class of contributions are those which will be integrated with +Bacula and become an essential part. Within this class of contributions, there +are two hurdles to surmount. One is getting your patch accepted, and two is +dealing with copyright issues. The following text describes some of the +requirements for such code. + +\subsection{Patches} +\index{Patches} +\addcontentsline{toc}{subsubsection}{Patches} + +Subject to the copyright assignment described below, your patches should be +sent in {\bf diff -u} format relative to the current contents of the Source +Forge SVN, which is the easiest to understand and integrate. +Please be sure to use the Bacula indenting standard (see below). +If you have checked out the source with SVN, you can get a diff using: + +\begin{verbatim} +svn update +svn diff > change.patch +\end{verbatim} + +If you plan on doing significant development work over a period of time, +after having your first patch reviewed and approved, you will be eligible +for having developer SVN access so that you can commit your changes +directly to the SVN repository. To do so, you will need a userid on Source +Forge. + +\subsection{Copyrights} +\index{Copyrights} +\addcontentsline{toc}{subsubsection}{Copyrights} + +To avoid future problems concerning changing licensing or +copyrights, all code contributions more than a hand full of lines +must be in the Public Domain or have the copyright transferred to +the Free Software Foundation Europe e.V. with a Fiduciary License +Agreement (FLA) as in the current code. Note, prior to +November 2004, the code was copyrighted by Kern Sibbald and John +Walker. After November 2004, the code was copyrighted by Kern +Sibbald, then on the 15th of November 2006, the copyright was +transferred to the Free Software Foundation Europe e.V. + +Your name should be clearly indicated as the author of the code, and you +must be extremely careful not to violate any copyrights or use other +people's code without acknowledging it. The purpose of this requirement is +to avoid future copyright, patent, or intellectual property problems. +Please read the LICENSE agreement in the main source code +directory. When you sign the Fiduciary License Agreement (FLA) +and send it in, you are argeeing to the terms of that LICENSE +file. + +To understand the possible source of future problems, please +examine the difficulties Mozilla is (was?) having finding +previous contributors at \elink{ +http://www.mozilla.org/MPL/missing.html} +{http://www.mozilla.org/MPL/missing.html}. The other important issue is to +avoid copyright, patent, or intellectual property violations as are currently +(May 2003) being claimed by SCO against IBM. + +Although the copyright will be held by the Free Software +Foundation Europe e.V., each developer is expected to indicate +that he wrote and/or modified a particular module (or file) and +any other sources. The copyright assignment may seem a bit +unusual, but in reality, it is not. Most large projects require +this. + +If you have any doubts about this, please don't hesitate to ask. The +objective is to assure the long term servival of the Bacula project. + +Items not needing a copyright assignment are: most small changes, +enhancements, or bug fixes of 5-10 lines of code, which amount to +less than 20% of any particular file. + +\subsection{Copyright Assignment -- Fiduciary License Agreement} +\index{Copyright Assignment} +\index{Assignment!Copyright} +\addcontentsline{toc}{subsubsection}{Copyright Assignment -- Fiduciary License Agreement} + +Since this is not a commercial enterprise, and we prefer to believe in +everyone's good faith, previously developers could assign the copyright by +explicitly acknowledging that they do so in their first submission. This +was sufficient if the developer is independent, or an employee of a +not-for-profit organization or a university. However, in an effort to +ensure that the Bacula code is really clean, beginning in August 2006, all +previous and future developers with SVN access will be asked to submit a +copyright assignment (or Fiduciary License Agreement -- FLA), +which means you agree to the LICENSE in the main source +directory. It also means that you receive back the right to use +the code that you have submitted. + +Any developer who wants to contribute and is employed by a company should +either list the employer as the owner of the code, or get +explicit permission from him to sign the copyright assignment. +This is because in many +countries, all work that an employee does whether on company time or in the +employee's free time is considered to be Intellectual Property of the +company. Obtaining official approval or an FLA from the company will avoid +misunderstandings between the employee, the company, and the Bacula +project. A good number of companies have already followed this procedure. + +The Fiduciary License Agreement is posted on the Bacula web site at: +\elink{http://www.bacula.org/FLA-bacula.en.pdf}{http://www.bacula.org/FLA-bacula.en.pdf} + +The instructions for filling out this agreement are also at: +\elink{http://www.bacula.org/?page=fsfe}{http://www.bacula.org/?page=fsfe} + +It should be filled out, then sent to: + +\begin{verbatim} + Free Software Foundation Europe + Freedom Task Force + Sumatrastrasse 25 + 8006 Zürich + Switzerland +\end{verbatim} + +Please note that the above address is different from the officially +registered office mentioned in the document. When you send in such a +complete document, please notify me: kern at sibbald dot com. + + + +\section{The Development Cycle} +\index{Developement Cycle} +\index{Cycle!Developement} +\addcontentsline{toc}{subsubsection}{Development Cycle} + +As I noted in the 1.38 ReleaseNotes, version 1.38 was different from prior +versions because it had a lot more contributions. I expect that this trend +will continue. As a consequence, I am going to modify how I normally do +development, and instead of making a list of all the features that I will +implement in the next version, I will personally sign up for one (maybe +two) projects at a time, and when they are complete, I will release a new +version. + +The difference is that I will have more time to review the new code that is +being contributed, and will be able to devote more time to a smaller number +of projects (1.38 had too many new features for me to handle correctly). + +I expect that future release schedules will be much the same, and the +number of new features will also be much the same providing that the +contributions continue to come -- and they show no signs of let up :-) + +\index{Feature Requests} +{\bf Feature Requests:} \\ +In addition, I would like to "formalize" the feature requests a bit. + +Instead of me maintaining an informal list of everything I run into +(kernstodo), I would like to maintain a "formal" list of projects. This +means that all new feature requests, including those recently discussed on +the email lists, must be formally submitted and approved. + +Formal submission of feature requests will take two forms: \\ +1. non-mandatory, but highly recommended is to discuss proposed new features +on the mailing list.\\ +2. Formal submission of an Feature Request in a special format. +I'll give an example of this below, but you can also find it on the web +site under "Support -\gt{} Feature Requests". Since it takes a bit of time to +properly fill out a Feature Request form, you probably should check on the email list +first. + +Once the Feature Request is received by the keeper of the projects list, it +will be sent to me, and I will either accept it, send it back +asking for clarification, send it to the email list asking for opinions, or +reject it. + +If it is accepted, it will go in the "projects" file (a simple ASCII file) +maintained in the main Bacula source directory. + +{\bf Implementation of Feature Requests:}\\ +Any qualified developer can sign up for a project. The project must have +an entry in the projects file, and the developer's name will appear in the +Status field. + +{\bf How Feature Requests are accepted:}\\ +Acceptance of Feature Requests depends on several things: \\ +1. feedback from users. If it is negative, the Feature Request will probably not be +accepted. \\ +2. the difficulty of the project. A project that is so +difficult that I cannot imagine finding someone to implement probably won't +be accepted. \\ + 3. whether or not the Feature Request fits within the +current stategy of Bacula (for example an Feature Request that requests changing the +tape to tar format would not be accepted, ...) + +{\bf How Feature Requests are prioritized:}\\ +Once an Feature Request is accepted, it needs to be implemented. If you +can find a developer for it, or one signs up for implementing it, then the +Feature Request becomes top priority (at least for that developer). + +Between releases of Bacula, we will generally solicit Feature Request input +for the next version, and by way of this email, we suggest that you send +discuss and send in your Feature Requests for the next release. Please +verify that the Feature Request is not in the current list (attached to this email). + +Once users have had several weeks to submit Feature Requests, the keeper of the +projects list will +organize them, and request users to vote on them. This will allow fixing +prioritizing the Feature Requests. Having a priority is one thing, but +getting it implement is another thing -- we are hoping that the Bacula +community will take more responsibility for assuring the implementation of +accepted Feature Requests. + +Feature Request format: +\begin{verbatim} +============= Empty Feature Request form =========== +Item n: One line summary ... + Date: Date submitted + Origin: Name and email of originator. + Status: + + What: More detailed explanation ... + + Why: Why it is important ... + + Notes: Additional notes or features (omit if not used) +============== End Feature Request form ============== +\end{verbatim} + +\begin{verbatim} +============= Example Completed Feature Request form =========== +Item 1: Implement a Migration job type that will move the job + data from one device to another. + Origin: Sponsored by Riege Sofware International GmbH. Contact: + Daniel Holtkamp + Date: 28 October 2005 + Status: Partially coded in 1.37 -- much more to do. Assigned to + Kern. + + What: The ability to copy, move, or archive data that is on a + device to another device is very important. + + Why: An ISP might want to backup to disk, but after 30 days + migrate the data to tape backup and delete it from + disk. Bacula should be able to handle this + automatically. It needs to know what was put where, + and when, and what to migrate -- it is a bit like + retention periods. Doing so would allow space to be + freed up for current backups while maintaining older + data on tape drives. + + Notes: Migration could be triggered by: + Number of Jobs + Number of Volumes + Age of Jobs + Highwater size (keep total size) + Lowwater mark +================================================= +\end{verbatim} + + +\section{Bacula Code Submissions and Projects} +\index{Submissions and Projects} +\addcontentsline{toc}{subsection}{Code Submissions and Projects} + +Getting code implemented in Bacula works roughly as follows: + +\begin{itemize} + +\item Kern is the project manager, but prefers not to be a "gate keeper". + This means that the developers are expected to be self-motivated, + and once they have experience submit directly to the SVN. However, + it is a good idea to have your patches reviewed prior to submitting, + and it is a bad idea to submit monster patches because no one will + be able to properly review them. See below for more details on this. + +\item There are growing numbers of contributions (very good). + +\item Some contributions come in the form of relatively small patches, + which Kern reviews, integrates, documents, tests, and maintains. + +\item All Bacula developers take full + responsibility for writing the code, posting as patches so that I can + review it as time permits, integrating it at an appropriate time, + responding to my requests for tweaking it (name changes, ...), + document it in the code, document it in the manual (even though + their mother tongue is not English), test it, develop and commit + regression scripts, and answer in a timely fashion all bug reports -- + even occassionally accepting additional bugs :-) + + This is a sustainable way of going forward with Bacula, and the + direction that the project will be taking more and more. For + example, in the past, we have had some very dedicated programmers + who did major projects. However, these + programmers due to outside obligations (job responsibilities change of + job, school duties, ...) could not continue to maintain the code. In + those cases, the code suffers from lack of maintenance, sometimes I + patch it, sometimes not. In the end, the code gets dropped from the + project (there are two such contributions that are heading in that + direction). When ever possible, we would like to avoid this, and + ensure a continuation of the code and a sharing of the development, + debugging, documentation, and maintenance responsibilities. +\end{itemize} + +\section{Patches for Released Versions} +\index{Patches for Released Versions} +\addcontentsline{toc}{subsection}{Patches for Released Versions} +If you fix a bug in a released version, you should, unless it is +an absolutely trivial bug, create and release a patch file for the +bug. The procedure is as follows: + +Fix the bug in the branch and in the trunk. + +Make a patch file for the branch and add the branch patch to +the patches directory in both the branch and the trunk. +The name should be 2.2.4-xxx.patch where xxx is unique, in this case it can +be "restore", e.g. 2.2.4-restore.patch. Add to the top of the +file a brief description and instructions for applying it -- see for example +2.2.4-poll-mount.patch. The best way to create the patch file is as +follows: + +\begin{verbatim} + (edit) 2.2.4-restore.patch + (input description) + (end edit) + + svn diff >>2.2.4-restore.patch +\end{verbatim} + +check to make sure no extra junk got put into the patch file (i.e. +it should have the patch for that bug only). + +If there is not a bug report on the problem, create one, then add the +patch to the bug report. + +Uthen upload it to the 2.2.x release of bacula-patches. + +So, end the end, the patch file is: +\begin{itemize} +\item Attached to the bug report + +\item In Branch-2.2/bacula/patches/... + +\item In the trunk + +\item Loaded on Source Forge bacula-patches 2.2.x release. When + you add it, click on the check box to send an Email so that all the + users that are monitoring SF patches get notified. +\end{itemize} + + + +\section{SVN Usage} +\index{SVN Usage} +\addcontentsline{toc}{subsection}{SVN Usage} + +Please note that if you are familar with CVS, SVN is very +similar (and better), but there can be a few surprising +differences. + +The *entire* Bacula SourceForge.net Subversion repository can be +checked out through SVN with the following command: + +\begin{verbatim} +svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula bacula +\end{verbatim} + +With the above command, you will get everything, which is a very large +amount of data: + +\begin{verbatim} +branches/ + Branch-1.32a/ + ... + Branch-2.0/ + import/ + vendor/ +tags/ + Release-1.1/ + ... + Release-2.0.2/ +trunk/ + bacula/ + docs/ + gui/ + regress/ + rescue/ +\end{verbatim} + +Note, you should NEVER commit code to any checkout that you have +done of a tag. All tags (e.g. Release-1.1, ... Release-2.0.2) +should be considered read-only. + +You may commit code to the most recent item in +branches (in the above the most recent one is Branch-2.0). If +you want to commit code to an older branch, then please contact +Kern first. + +You may create your own tags and/or branches, but they should +have a name clearly distinctive from Branch-, Release-, or Beta-, +which are official names used by the project. If you create a +tag, then you should NEVER commit code to it, for the same +reason noted above -- it should serve as a marker for something +you released. If you create a branch, then you are free to +commit to it as you wish. + +You may, of course, commit to the trunk. + +In summary: + +\begin{verbatim} +branches + Branch-nnn +tags + Release-nnn + Beta-nnn +\end{verbatim} + +are reserved names to be created only by the project manager (or +with his OK), where the nnn is any sequence of numbers and +periods (e.g. 2.0, 2.0.1, ...). + +In addition all tags even those that you create are read-only +forever. Typically tags represent release points either in the +trunc or in a branch. + + +Coming back to getting source code. +If you only want the current Bacula source code, you could use: + +\begin{verbatim} +svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula bacula +\end{verbatim} + +To view what is in the SVN, point your browser at the following URL: +http://bacula.svn.sourceforge.net/viewvc/bacula/ + +Many of the Subversion (svn) commands are almost identical to those that +you have used for cvs, but some (such as a checkout) can have surprising +results, so you should take a careful look at the documentation. + +Robert has kindly provided the following documentation on the new +svn repository and how to use it: + +Here is the list of branches: +\begin{verbatim} + Branch-1.32a + Branch-1.32e + Branch-1.34.2 + Branch-1.34.5 + Branch-1.36 + Branch-1.36.1 + Branch-1.36.2 + Branch-1.38 + Branch-2.0 + import + vendor +\end{verbatim} + +The list of tags is: +\begin{verbatim} + Release-1.1 Release-1.19 Release-1.19a Release-1.19b + Release-1.20 Release-1.21 Release-1.22 Release-1.23 + Release-1.23a Release-1.24 Release-1.25 Release-1.25a + Release-1.26 Release-1.27 Release-1.27a Release-1.27b + Release-1.27c Release-1.28 Release-1.29 Release-1.30 + Release-1.31 Release-1.31a Release-1.32 Release-1.32a + Release-1.32b Release-1.32c Release-1.32d Release-1.32e + Release-1.32f Release-1.32f-2 Release-1.32f-3 Release-1.32f-4 + Release-1.32f-5 Release-1.34.0 Release-1.34.1 Release-1.34.3 + Release-1.34.4 Release-1.34.5 Release-1.34.6 Release-1.35.1 + Release-1.35.2 Release-1.35.3 Release-1.35.6 Release-1.35.7 + Release-1.35.8 Release-1.36.0 Release-1.36.1 Release-1.36.2 + Release-1.36.3 Release-1.38.0 Release-1.38.1 Release-1.38.10 + Release-1.38.11 Release-1.38.2 Release-1.38.3 Release-1.38.4 + Release-1.38.5 Release-1.38.6 Release-1.38.7 Release-1.38.8 + Release-1.38.9 Release-1.8.1 Release-1.8.2 Release-1.8.3 + Release-1.8.4 Release-1.8.5 Release-1.8.6 Release-2.0.0 + Release-2.0.1 Release-2.0.2 +\end{verbatim} + +Here is a list of commands to get you started. The recommended book is +"Version Control with Subversion", by Ben Collins-Sussmann, +Brian W. Fitzpatrick, and Michael Pilato, O'Reilly. The book is +Open Source, so it is also available on line at: + +\begin{verbatim} + http://svnbook.red-bean.com +\end{verbatim} + +Get a list of commands + +\begin{verbatim} + svn help +\end{verbatim} + +Get a help with a command + +\begin{verbatim} + svn help command +\end{verbatim} + +Checkout the HEAD revision of all modules from the project into the +directory bacula-new + +\begin{verbatim} + svn co https://bacula.svn.sourceforge.net/svnroot/bacula/trunk bacula.new +\end{verbatim} + +Checkout the HEAD revision of the bacula module into the bacula subdirectory + +\begin{verbatim} + svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula +\end{verbatim} + +See which files have changed in the working copy + +\begin{verbatim} + svn status +\end{verbatim} + +See which files are out of date + +\begin{verbatim} + svn status -u +\end{verbatim} + +Add a new file file.c + +\begin{verbatim} + svn add file.c +\end{verbatim} + +Create a new directory + +\begin{verbatim} + svn mkdir newdir +\end{verbatim} + +Delete an obsolete file + +\begin{verbatim} + svn delete file.c +\end{verbatim} + +Rename a file + +\begin{verbatim} + svn move file.c newfile.c +\end{verbatim} + +Move a file to a new location + +\begin{verbatim} + svn move file.c ../newdir/file.c +\end{verbatim} + +Copy a file retaining the original history in the new file + +\begin{verbatim} + svn copy file.c newfile.c +\end{verbatim} + +Update the working copy with the outstanding changes + +\begin{verbatim} + svn update +\end{verbatim} + +Compare working copy with the repository + +\begin{verbatim} + svn diff file.c +\end{verbatim} + +Commit the changes in the local working copy + +\begin{verbatim} + svn commit +\end{verbatim} + +Specify which files are ignored in the current directory + +\begin{verbatim} + svn propedit svn:ignore . +\end{verbatim} + +Mark a file to be executable + +\begin{verbatim} + svn propset svn:executable '*' prog.sh +\end{verbatim} + +Unmark a file as executable + +\begin{verbatim} + svn propdel svn:executable prog.sh +\end{verbatim} + +List a file's properties + +\begin{verbatim} + svn proplist file.c +\end{verbatim} + +Create a branch for a new version + +\begin{verbatim} + svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/trunk \ + https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1 +\end{verbatim} + +Tag a version for a new release + +\begin{verbatim} + svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1 \ + https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Release-2.1 +\end{verbatim} + + +Let's say you are working in the directory scripts. You would then do: + +\begin{verbatim} +cd scripts +(edit some files) +\end{verbatim} + +when you are happy with your changes, you can do the following: + +\begin{verbatim} +cd bacula (to your top level directory) +svn diff my-changes.patch +\end{verbatim} + +When the command is done, you can look in the file my-changes.patch +and you will see all the changes you have made to your copy of the +repository. Make sure that you understand all the changes that +it reports before proceeding. If you modified files that you do +do not want to commit to the main repository, you can simply delete +them from your local directory, and they will be restored from the +repository with the "svn update" that is shown below. Normally, you +should not find changes to files that you do not want to commit, and +if you find yourself in that position a lot, you are probably doing +something wrong. + +Let's assume that now you want to commit your changes to the main +SVN repository. + +First do: + +\begin{verbatim} +cd bacula +svn update +\end{verbatim} + +When you do this, it will pull any changes made by other developers into +your local copy of the repository, and it will check for conflicts. If there +are any, it will tell you, and you will need to resolve them. The problems +of resolving conflicts are a bit more than this document can cover, but +you can examine the files it claims have conflicts and look for \lt{}\lt{}\lt{}\lt{} +or look in the .rej files that it creates. If you have problems, just ask +on the developer's list. + +Note, doing the above "svn update" is not absolutely necessary. There are +times when you may be working on code and you want to commit it, but you +explicitly do not want to move up to the latest version of the code in +the SVN. If that is the case, you can simply skip the "svn update" and +do the commit shown below. If the commit fails because of a conflict, it +will tell you, and you must resolve the conflict before it will permit +you to do the commit. + +Once your local copy of the repository has been updated, you can now +commit your changes: + +\begin{verbatim} +svn commit -m "Some comment about what you changed" +\end{verbatim} + +or if you really only want to commit a single file, you can +do: + +\begin{verbatim} +svn commit -m "comment" scripts/file-I-edited +\end{verbatim} + +Note, if you have done a build in your directory, or you have added +other new files, the commit will update only the files that are +actually in the repository. For example, none of the object files +are stored in the repository, so when you do a commit, those object +files will simply be ignored. + +If you want to add new files or remove files from the main SVN +repository, and you are not experienced with SVN, please ask Kern +to do it. If you follow the simple steps above, it is unlikely that +you will do any damage to the repository, and if you do, it is always +possible for us to recover, but it can be painful. + +If you are only working in one subdirectory of say the bacula project, +for example, the scripts directory, you can do your commit from +that subdirectory, and only the changes in that directory and all its +subdirectories will be committed. This can be helpful for translators. +If you are doing a French translation, you will be working in +docs/manual-fr, and if you are always cd'ed into that directory when +doing your commits, your commit will effect only that directory. As +long as you are careful only to change files that you want changed, +you have little to worry about. + +\section{Subversion Resources} +\index{Subversion (svn) Resources} +\addcontentsline{toc}{subsection}{Subversion Resources} + +\begin{verbatim} +cvs2svn Statistics: +------------------ +Total CVS Files: 3286 +Total CVS Revisions: 28924 +Total Unique Tags: 63 +Total Unique Branches: 11 +CVS Repos Size in KB: 232421 +Total SVN Commits: 4116 +First Revision Date: Tue Apr 23 12:42:57 2002 +Last Revision Date: Tue Feb 6 06:37:57 2007 +\end{verbatim} + +The new Subversion repository size on Robert's machine: + +\begin{verbatim} +4.0K bacula-tst/dav +12K bacula-tst/locks +40K bacula-tst/hooks +16K bacula-tst/conf +190M bacula-tst/db/revs +17M bacula-tst/db/revprops +4.0K bacula-tst/db/transactions +206M bacula-tst/db +206M bacula-tst +\end{verbatim} + + +Main Subversion Web Page +\elink{http://subversion.tigris.org}{http://subversion.tigris.org} + +Subversion Book +\elink{http://svnbook.red-bean.com}{http://svnbook.red-bean.com} + +Subversion Clients +\elink{http://subversion.tigris.org/project\_packages.html}{http://subversion.tigris.org/project\_packages.html} + + (For Windows users the TortoiseSVN package is awesome) + +GUI UNIX client link +\elink{http://rapidsvn.tigris.org/}{http://rapidsvn.tigris.org/} + +A nice KDE GUI client: +kdesvn + + + +\section{Developing Bacula} +\index{Developing Bacula} +\index{Bacula!Developing} +\addcontentsline{toc}{subsubsection}{Developing Bacula} + +Typically the simplest way to develop Bacula is to open one xterm window +pointing to the source directory you wish to update; a second xterm window at +the top source directory level, and a third xterm window at the bacula +directory \lt{}top\gt{}/src/bacula. After making source changes in one of the +directories, in the top source directory xterm, build the source, and start +the daemons by entering: + +make and + +./startit then in the enter: + +./console or + +./gnome-console to start the Console program. Enter any commands for testing. +For example: run kernsverify full. + +Note, the instructions here to use {\bf ./startit} are different from using a +production system where the administrator starts Bacula by entering {\bf +./bacula start}. This difference allows a development version of {\bf Bacula} +to be run on a computer at the same time that a production system is running. +The {\bf ./startit} strip starts {\bf Bacula} using a different set of +configuration files, and thus permits avoiding conflicts with any production +system. + +To make additional source changes, exit from the Console program, and in the +top source directory, stop the daemons by entering: + +./stopit then repeat the process. + +\subsection{Debugging} +\index{Debugging} +\addcontentsline{toc}{subsubsection}{Debugging} + +Probably the first thing to do is to turn on debug output. + +A good place to start is with a debug level of 20 as in {\bf ./startit -d20}. +The startit command starts all the daemons with the same debug level. +Alternatively, you can start the appropriate daemon with the debug level you +want. If you really need more info, a debug level of 60 is not bad, and for +just about everything a level of 200. + +\subsection{Using a Debugger} +\index{Using a Debugger} +\index{Debugger!Using a} +\addcontentsline{toc}{subsubsection}{Using a Debugger} + +If you have a serious problem such as a segmentation fault, it can usually be +found quickly using a good multiple thread debugger such as {\bf gdb}. For +example, suppose you get a segmentation violation in {\bf bacula-dir}. You +might use the following to find the problem: + +\lt{}start the Storage and File daemons\gt{} +cd dird +gdb ./bacula-dir +run -f -s -c ./dird.conf +\lt{}it dies with a segmentation fault\gt{} +where +The {\bf -f} option is specified on the {\bf run} command to inhibit {\bf +dird} from going into the background. You may also want to add the {\bf -s} +option to the run command to disable signals which can potentially interfere +with the debugging. + +As an alternative to using the debugger, each {\bf Bacula} daemon has a built +in back trace feature when a serious error is encountered. It calls the +debugger on itself, produces a back trace, and emails the report to the +developer. For more details on this, please see the chapter in the main Bacula +manual entitled ``What To Do When Bacula Crashes (Kaboom)''. + +\subsection{Memory Leaks} +\index{Leaks!Memory} +\index{Memory Leaks} +\addcontentsline{toc}{subsubsection}{Memory Leaks} + +Because Bacula runs routinely and unattended on client and server machines, it +may run for a long time. As a consequence, from the very beginning, Bacula +uses SmartAlloc to ensure that there are no memory leaks. To make detection of +memory leaks effective, all Bacula code that dynamically allocates memory MUST +have a way to release it. In general when the memory is no longer needed, it +should be immediately released, but in some cases, the memory will be held +during the entire time that Bacula is executing. In that case, there MUST be a +routine that can be called at termination time that releases the memory. In +this way, we will be able to detect memory leaks. Be sure to immediately +correct any and all memory leaks that are printed at the termination of the +daemons. + +\subsection{Special Files} +\index{Files!Special} +\index{Special Files} +\addcontentsline{toc}{subsubsection}{Special Files} + +Kern uses files named 1, 2, ... 9 with any extension as scratch files. Thus +any files with these names are subject to being rudely deleted at any time. + +\subsection{When Implementing Incomplete Code} +\index{Code!When Implementing Incomplete} +\index{When Implementing Incomplete Code} +\addcontentsline{toc}{subsubsection}{When Implementing Incomplete Code} + +Please identify all incomplete code with a comment that contains + +\begin{verbatim} +***FIXME*** +\end{verbatim} + +where there are three asterisks (*) before and after the word +FIXME (in capitals) and no intervening spaces. This is important as it allows +new programmers to easily recognize where things are partially implemented. + +\subsection{Bacula Source File Structure} +\index{Structure!Bacula Source File} +\index{Bacula Source File Structure} +\addcontentsline{toc}{subsubsection}{Bacula Source File Structure} + +The distribution generally comes as a tar file of the form {\bf +bacula.x.y.z.tar.gz} where x, y, and z are the version, release, and update +numbers respectively. + +Once you detar this file, you will have a directory structure as follows: + +\footnotesize +\begin{verbatim} +| +Tar file: +|- depkgs + |- mtx (autochanger control program + tape drive info) + |- sqlite (SQLite database program) + +Tar file: +|- depkgs-win32 + |- pthreads (Native win32 pthreads library -- dll) + |- zlib (Native win32 zlib library) + |- wx (wxWidgets source code) + +Project bacula: +|- bacula (main source directory containing configuration + | and installation files) + |- autoconf (automatic configuration files, not normally used + | by users) + |- intl (programs used to translate) + |- platforms (OS specific installation files) + |- redhat (Red Hat installation) + |- solaris (Sun installation) + |- freebsd (FreeBSD installation) + |- irix (Irix installation -- not tested) + |- unknown (Default if system not identified) + |- po (translations of source strings) + |- src (source directory; contains global header files) + |- cats (SQL catalog database interface directory) + |- console (bacula user agent directory) + |- dird (Director daemon) + |- filed (Unix File daemon) + |- win32 (Win32 files to make bacula-fd be a service) + |- findlib (Unix file find library for File daemon) + |- gnome-console (GNOME version of console program) + |- lib (General Bacula library) + |- stored (Storage daemon) + |- tconsole (Tcl/tk console program -- not yet working) + |- testprogs (test programs -- normally only in Kern's tree) + |- tools (Various tool programs) + |- win32 (Native Win32 File daemon) + |- baculafd (Visual Studio project file) + |- compat (compatibility interface library) + |- filed (links to src/filed) + |- findlib (links to src/findlib) + |- lib (links to src/lib) + |- console (beginning of native console program) + |- wx-console (wxWidget console Win32 specific parts) + |- wx-console (wxWidgets console main source program) + +Project regress: +|- regress (Regression scripts) + |- bin (temporary directory to hold Bacula installed binaries) + |- build (temporary directory to hold Bacula source) + |- scripts (scripts and .conf files) + |- tests (test scripts) + |- tmp (temporary directory for temp files) + |- working (temporary working directory for Bacula daemons) + +Project docs: +|- docs (documentation directory) + |- developers (Developer's guide) + |- home-page (Bacula's home page source) + |- manual (html document directory) + |- manual-fr (French translation) + |- manual-de (German translation) + |- techlogs (Technical development notes); + +Project rescue: +|- rescue (Bacula rescue CDROM) + |- linux (Linux rescue CDROM) + |- cdrom (Linux rescue CDROM code) + ... + |- solaris (Solaris rescue -- incomplete) + |- freebsd (FreeBSD rescue -- incomplete) + +Project gui: +|- gui (Bacula GUI projects) + |- bacula-web (Bacula web php management code) + |- bimagemgr (Web application for burning CDROMs) + + +\end{verbatim} +\normalsize + +\subsection{Header Files} +\index{Header Files} +\index{Files!Header} +\addcontentsline{toc}{subsubsection}{Header Files} + +Please carefully follow the scheme defined below as it permits in general only +two header file includes per C file, and thus vastly simplifies programming. +With a large complex project like Bacula, it isn't always easy to ensure that +the right headers are invoked in the right order (there are a few kludges to +make this happen -- i.e. in a few include files because of the chicken and egg +problem, certain references to typedefs had to be replaced with {\bf void} ). + +Every file should include {\bf bacula.h}. It pulls in just about everything, +with very few exceptions. If you have system dependent ifdefing, please do it +in {\bf baconfig.h}. The version number and date are kept in {\bf version.h}. + +Each of the subdirectories (console, cats, dird, filed, findlib, lib, stored, +...) contains a single directory dependent include file generally the name of +the directory, which should be included just after the include of {\bf +bacula.h}. This file (for example, for the dird directory, it is {\bf dird.h}) +contains either definitions of things generally needed in this directory, or +it includes the appropriate header files. It always includes {\bf protos.h}. +See below. + +Each subdirectory contains a header file named {\bf protos.h}, which contains +the prototypes for subroutines exported by files in that directory. {\bf +protos.h} is always included by the main directory dependent include file. + +\subsection{Programming Standards} +\index{Standards!Programming} +\index{Programming Standards} +\addcontentsline{toc}{subsubsection}{Programming Standards} + +For the most part, all code should be written in C unless there is a burning +reason to use C++, and then only the simplest C++ constructs will be used. +Note, Bacula is slowly evolving to use more and more C++. + +Code should have some documentation -- not a lot, but enough so that I can +understand it. Look at the current code, and you will see that I document more +than most, but am definitely not a fanatic. + +I prefer simple linear code where possible. Gotos are strongly discouraged +except for handling an error to either bail out or to retry some code, and +such use of gotos can vastly simplify the program. + +Remember this is a C program that is migrating to a {\bf tiny} subset of C++, +so be conservative in your use of C++ features. + +\subsection{Do Not Use} +\index{Use!Do Not} +\index{Do Not Use} +\addcontentsline{toc}{subsubsection}{Do Not Use} + +\begin{itemize} + \item STL -- it is totally incomprehensible. +\end{itemize} + +\subsection{Avoid if Possible} +\index{Possible!Avoid if} +\index{Avoid if Possible} +\addcontentsline{toc}{subsubsection}{Avoid if Possible} + +\begin{itemize} +\item Using {\bf void *} because this generally means that one must + using casting, and in C++ casting is rather ugly. It is OK to use + void * to pass structure address where the structure is not known + to the routines accepting the packet (typically callback routines). + However, declaring "void *buf" is a bad idea. Please use the + correct types whenever possible. + +\item Using undefined storage specifications such as (short, int, long, + long long, size\_t ...). The problem with all these is that the number of bytes + they allocate depends on the compiler and the system. Instead use + Bacula's types (int8\_t, uint8\_t, int32\_t, uint32\_t, int64\_t, and + uint64\_t). This guarantees that the variables are given exactly the + size you want. Please try at all possible to avoid using size\_t ssize\_t + and the such. They are very system dependent. However, some system + routines may need them, so their use is often unavoidable. + +\item Returning a malloc'ed buffer from a subroutine -- someone will forget + to release it. + +\item Heap allocation (malloc) unless needed -- it is expensive. Use + POOL\_MEM instead. + +\item Templates -- they can create portability problems. + +\item Fancy or tricky C or C++ code, unless you give a good explanation of + why you used it. + +\item Too much inheritance -- it can complicate the code, and make reading it + difficult (unless you are in love with colons) + +\end{itemize} + +\subsection{Do Use Whenever Possible} +\index{Possible!Do Use Whenever} +\index{Do Use Whenever Possible} +\addcontentsline{toc}{subsubsection}{Do Use Whenever Possible} + +\begin{itemize} +\item Locking and unlocking within a single subroutine. + +\item A single point of exit from all subroutines. A goto is + perfectly OK to use to get out early, but only to a label + named bail\_out, and possibly an ok\_out. See current code + examples. + +\item Malloc and free within a single subroutine. + +\item Comments and global explanations on what your code or algorithm does. + +\end{itemize} + +\subsection{Indenting Standards} +\index{Standards!Indenting} +\index{Indenting Standards} +\addcontentsline{toc}{subsubsection}{Indenting Standards} + +I cannot stand code indented 8 columns at a time. This makes the code +unreadable. Even 4 at a time uses a lot of space, so I have adopted indenting +3 spaces at every level. Note, indention is the visual appearance of the +source on the page, while tabbing is replacing a series of up to 8 spaces from +a tab character. + +The closest set of parameters for the Linux {\bf indent} program that will +produce reasonably indented code are: + +\footnotesize +\begin{verbatim} +-nbad -bap -bbo -nbc -br -brs -c36 -cd36 -ncdb -ce -ci3 -cli0 +-cp36 -d0 -di1 -ndj -nfc1 -nfca -hnl -i3 -ip0 -l85 -lp -npcs +-nprs -npsl -saf -sai -saw -nsob -nss -nbc -ncs -nbfda +\end{verbatim} +\normalsize + +You can put the above in your .indent.pro file, and then just invoke indent on +your file. However, be warned. This does not produce perfect indenting, and it +will mess up C++ class statements pretty badly. + +Braces are required in all if statements (missing in some very old code). To +avoid generating too many lines, the first brace appears on the first line +(e.g. of an if), and the closing brace is on a line by itself. E.g. + +\footnotesize +\begin{verbatim} + if (abc) { + some_code; + } +\end{verbatim} +\normalsize + +Just follow the convention in the code. Originally I indented case clauses +under a switch(), but now I prefer non-indented cases. + +\footnotesize +\begin{verbatim} + switch (code) { + case 'A': + do something + break; + case 'B': + again(); + break; + default: + break; + } +\end{verbatim} +\normalsize + +Avoid using // style comments except for temporary code or turning off debug +code. Standard C comments are preferred (this also keeps the code closer to +C). + +Attempt to keep all lines less than 85 characters long so that the whole line +of code is readable at one time. This is not a rigid requirement. + +Always put a brief description at the top of any new file created describing +what it does and including your name and the date it was first written. Please +don't forget any Copyrights and acknowledgments if it isn't 100\% your code. +Also, include the Bacula copyright notice that is in {\bf src/c}. + +In general you should have two includes at the top of the an include for the +particular directory the code is in, for includes are needed, but this should +be rare. + +In general (except for self-contained packages), prototypes should all be put +in {\bf protos.h} in each directory. + +Always put space around assignment and comparison operators. + +\footnotesize +\begin{verbatim} + a = 1; + if (b >= 2) { + cleanup(); + } +\end{verbatim} +\normalsize + +but your can compress things in a {\bf for} statement: + +\footnotesize +\begin{verbatim} + for (i=0; i < del.num_ids; i++) { + ... +\end{verbatim} +\normalsize + +Don't overuse the inline if (?:). A full {\bf if} is preferred, except in a +print statement, e.g.: + +\footnotesize +\begin{verbatim} + if (ua->verbose \&& del.num_del != 0) { + bsendmsg(ua, _("Pruned %d %s on Volume %s from catalog.\n"), del.num_del, + del.num_del == 1 ? "Job" : "Jobs", mr->VolumeName); + } +\end{verbatim} +\normalsize + +Leave a certain amount of debug code (Dmsg) in code you submit, so that future +problems can be identified. This is particularly true for complicated code +likely to break. However, try to keep the debug code to a minimum to avoid +bloating the program and above all to keep the code readable. + +Please keep the same style in all new code you develop. If you include code +previously written, you have the option of leaving it with the old indenting +or re-indenting it. If the old code is indented with 8 spaces, then please +re-indent it to Bacula standards. + +If you are using {\bf vim}, simply set your tabstop to 8 and your shiftwidth +to 3. + +\subsection{Tabbing} +\index{Tabbing} +\addcontentsline{toc}{subsubsection}{Tabbing} + +Tabbing (inserting the tab character in place of spaces) is as normal on all +Unix systems -- a tab is converted space up to the next column multiple of 8. +My editor converts strings of spaces to tabs automatically -- this results in +significant compression of the files. Thus, you can remove tabs by replacing +them with spaces if you wish. Please don't confuse tabbing (use of tab +characters) with indenting (visual alignment of the code). + +\subsection{Don'ts} +\index{Don'ts} +\addcontentsline{toc}{subsubsection}{Don'ts} + +Please don't use: + +\footnotesize +\begin{verbatim} +strcpy() +strcat() +strncpy() +strncat(); +sprintf() +snprintf() +\end{verbatim} +\normalsize + +They are system dependent and un-safe. These should be replaced by the Bacula +safe equivalents: + +\footnotesize +\begin{verbatim} +char *bstrncpy(char *dest, char *source, int dest_size); +char *bstrncat(char *dest, char *source, int dest_size); +int bsnprintf(char *buf, int32_t buf_len, const char *fmt, ...); +int bvsnprintf(char *str, int32_t size, const char *format, va_list ap); +\end{verbatim} +\normalsize + +See src/lib/bsys.c for more details on these routines. + +Don't use the {\bf \%lld} or the {\bf \%q} printf format editing types to edit +64 bit integers -- they are not portable. Instead, use {\bf \%s} with {\bf +edit\_uint64()}. For example: + +\footnotesize +\begin{verbatim} + char buf[100]; + uint64_t num = something; + char ed1[50]; + bsnprintf(buf, sizeof(buf), "Num=%s\n", edit_uint64(num, ed1)); +\end{verbatim} +\normalsize + +The edit buffer {\bf ed1} must be at least 27 bytes long to avoid overflow. +See src/lib/edit.c for more details. If you look at the code, don't start +screaming that I use {\bf lld}. I actually use subtle trick taught to me by +John Walker. The {\bf lld} that appears in the editing routine is actually +{\bf \#define} to a what is needed on your OS (usually ``lld'' or ``q'') and +is defined in autoconf/configure.in for each OS. C string concatenation causes +the appropriate string to be concatenated to the ``\%''. + +Also please don't use the STL or Templates or any complicated C++ code. + +\subsection{Message Classes} +\index{Classes!Message} +\index{Message Classes} +\addcontentsline{toc}{subsubsection}{Message Classes} + +Currently, there are five classes of messages: Debug, Error, Job, Memory, +and Queued. + +\subsection{Debug Messages} +\index{Messages!Debug} +\index{Debug Messages} +\addcontentsline{toc}{subsubsection}{Debug Messages} + +Debug messages are designed to be turned on at a specified debug level and are +always sent to STDOUT. There are designed to only be used in the development +debug process. They are coded as: + +DmsgN(level, message, arg1, ...) where the N is a number indicating how many +arguments are to be substituted into the message (i.e. it is a count of the +number arguments you have in your message -- generally the number of percent +signs (\%)). {\bf level} is the debug level at which you wish the message to +be printed. message is the debug message to be printed, and arg1, ... are the +arguments to be substituted. Since not all compilers support \#defines with +varargs, you must explicitly specify how many arguments you have. + +When the debug message is printed, it will automatically be prefixed by the +name of the daemon which is running, the filename where the Dmsg is, and the +line number within the file. + +Some actual examples are: + +Dmsg2(20, ``MD5len=\%d MD5=\%s\textbackslash{}n'', strlen(buf), buf); + +Dmsg1(9, ``Created client \%s record\textbackslash{}n'', client->hdr.name); + +\subsection{Error Messages} +\index{Messages!Error} +\index{Error Messages} +\addcontentsline{toc}{subsubsection}{Error Messages} + +Error messages are messages that are related to the daemon as a whole rather +than a particular job. For example, an out of memory condition my generate an +error message. They should be very rarely needed. In general, you should be +using Job and Job Queued messages (Jmsg and Qmsg). They are coded as: + +EmsgN(error-code, level, message, arg1, ...) As with debug messages, you must +explicitly code the of arguments to be substituted in the message. error-code +indicates the severity or class of error, and it may be one of the following: + +\addcontentsline{lot}{table}{Message Error Code Classes} +\begin{longtable}{lp{3in}} +{{\bf M\_ABORT} } & {Causes the daemon to immediately abort. This should be +used only in extreme cases. It attempts to produce a traceback. } \\ +{{\bf M\_ERROR\_TERM} } & {Causes the daemon to immediately terminate. This +should be used only in extreme cases. It does not produce a traceback. } \\ +{{\bf M\_FATAL} } & {Causes the daemon to terminate the current job, but the +daemon keeps running } \\ +{{\bf M\_ERROR} } & {Reports the error. The daemon and the job continue +running } \\ +{{\bf M\_WARNING} } & {Reports an warning message. The daemon and the job +continue running } \\ +{{\bf M\_INFO} } & {Reports an informational message.} + +\end{longtable} + +There are other error message classes, but they are in a state of being +redesigned or deprecated, so please do not use them. Some actual examples are: + + +Emsg1(M\_ABORT, 0, ``Cannot create message thread: \%s\textbackslash{}n'', +strerror(status)); + +Emsg3(M\_WARNING, 0, ``Connect to File daemon \%s at \%s:\%d failed. Retrying +...\textbackslash{}n'', client-\gt{}hdr.name, client-\gt{}address, +client-\gt{}port); + +Emsg3(M\_FATAL, 0, ``bdird\lt{}filed: bad response from Filed to \%s command: +\%d \%s\textbackslash{}n'', cmd, n, strerror(errno)); + +\subsection{Job Messages} +\index{Job Messages} +\index{Messages!Job} +\addcontentsline{toc}{subsubsection}{Job Messages} + +Job messages are messages that pertain to a particular job such as a file that +could not be saved, or the number of files and bytes that were saved. They +Are coded as: +\begin{verbatim} +Jmsg(jcr, M\_FATAL, 0, "Text of message"); +\end{verbatim} +A Jmsg with M\_FATAL will fail the job. The Jmsg() takes varargs so can +have any number of arguments for substituted in a printf like format. +Output from the Jmsg() will go to the Job report. +
+If the Jmsg is followed with a number such as Jmsg1(...), the number +indicates the number of arguments to be substituted (varargs is not +standard for \#defines), and what is more important is that the file and +line number will be prefixed to the message. This permits a sort of debug +from user's output. + +\subsection{Queued Job Messages} +\index{Queued Job Messages} +\index{Messages!Job} +\addcontentsline{toc}{subsubsection}{Queued Job Messages} +Queued Job messages are similar to Jmsg()s except that the message is +Queued rather than immediately dispatched. This is necessary within the +network subroutines and in the message editing routines. This is to prevent +recursive loops, and to ensure that messages can be delivered even in the +event of a network error. + + +\subsection{Memory Messages} +\index{Messages!Memory} +\index{Memory Messages} +\addcontentsline{toc}{subsubsection}{Memory Messages} + +Memory messages are messages that are edited into a memory buffer. Generally +they are used in low level routines such as the low level device file dev.c in +the Storage daemon or in the low level Catalog routines. These routines do not +generally have access to the Job Control Record and so they return error +essages reformatted in a memory buffer. Mmsg() is the way to do this. diff --git a/docs/manuals/de/developers/index.perl b/docs/manuals/de/developers/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/developers/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/developers/latex2html-init.pl b/docs/manuals/de/developers/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/developers/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/developers/md5.tex b/docs/manuals/de/developers/md5.tex new file mode 100644 index 00000000..aed995b4 --- /dev/null +++ b/docs/manuals/de/developers/md5.tex @@ -0,0 +1,184 @@ +%% +%% + +\chapter{Bacula MD5 Algorithm} +\label{MD5Chapter} +\addcontentsline{toc}{section}{} + +\section{Command Line Message Digest Utility } +\index{Utility!Command Line Message Digest } +\index{Command Line Message Digest Utility } +\addcontentsline{toc}{subsection}{Command Line Message Digest Utility} + + +This page describes {\bf md5}, a command line utility usable on either Unix or +MS-DOS/Windows, which generates and verifies message digests (digital +signatures) using the MD5 algorithm. This program can be useful when +developing shell scripts or Perl programs for software installation, file +comparison, and detection of file corruption and tampering. + +\subsection{Name} +\index{Name} +\addcontentsline{toc}{subsubsection}{Name} + +{\bf md5} - generate / check MD5 message digest + +\subsection{Synopsis} +\index{Synopsis } +\addcontentsline{toc}{subsubsection}{Synopsis} + +{\bf md5} [ {\bf -c}{\it signature} ] [ {\bf -u} ] [ {\bf -d}{\it input\_text} +| {\it infile} ] [ {\it outfile} ] + +\subsection{Description} +\index{Description } +\addcontentsline{toc}{subsubsection}{Description} + +A {\it message digest} is a compact digital signature for an arbitrarily long +stream of binary data. An ideal message digest algorithm would never generate +the same signature for two different sets of input, but achieving such +theoretical perfection would require a message digest as long as the input +file. Practical message digest algorithms compromise in favour of a digital +signature of modest size created with an algorithm designed to make +preparation of input text with a given signature computationally infeasible. +Message digest algorithms have much in common with techniques used in +encryption, but to a different end; verification that data have not been +altered since the signature was published. + +Many older programs requiring digital signatures employed 16 or 32 bit {\it +cyclical redundancy codes} (CRC) originally developed to verify correct +transmission in data communication protocols, but these short codes, while +adequate to detect the kind of transmission errors for which they were +intended, are insufficiently secure for applications such as electronic +commerce and verification of security related software distributions. + +The most commonly used present-day message digest algorithm is the 128 bit MD5 +algorithm, developed by Ron Rivest of the +\elink{MIT}{http://web.mit.edu/} +\elink{Laboratory for Computer Science}{http://www.lcs.mit.edu/} and +\elink{RSA Data Security, Inc.}{http://www.rsa.com/} The algorithm, with a +reference implementation, was published as Internet +\elink{RFC 1321}{http://www.fourmilab.ch/md5/rfc1321.html} in April 1992, and +was placed into the public domain at that time. Message digest algorithms such +as MD5 are not deemed ``encryption technology'' and are not subject to the +export controls some governments impose on other data security products. +(Obviously, the responsibility for obeying the laws in the jurisdiction in +which you reside is entirely your own, but many common Web and Mail utilities +use MD5, and I am unaware of any restrictions on their distribution and use.) + +The MD5 algorithm has been implemented in numerous computer languages +including C, +\elink{Perl}{http://www.perl.org/}, and +\elink{Java}{http://www.javasoft.com/}; if you're writing a program in such a +language, track down a suitable subroutine and incorporate it into your +program. The program described on this page is a {\it command line} +implementation of MD5, intended for use in shell scripts and Perl programs (it +is much faster than computing an MD5 signature directly in Perl). This {\bf +md5} program was originally developed as part of a suite of tools intended to +monitor large collections of files (for example, the contents of a Web site) +to detect corruption of files and inadvertent (or perhaps malicious) changes. +That task is now best accomplished with more comprehensive packages such as +\elink{Tripwire}{ftp://coast.cs.purdue.edu/pub/COAST/Tripwire/}, but the +command line {\bf md5} component continues to prove useful for verifying +correct delivery and installation of software packages, comparing the contents +of two different systems, and checking for changes in specific files. + +\subsection{Options} +\index{Options } +\addcontentsline{toc}{subsubsection}{Options} + +\begin{description} + +\item [{\bf -c}{\it signature} ] + \index{-csignature } + Computes the signature of the specified {\it infile} or the string supplied +by the {\bf -d} option and compares it against the specified {\it signature}. +If the two signatures match, the exit status will be zero, otherwise the exit +status will be 1. No signature is written to {\it outfile} or standard +output; only the exit status is set. The signature to be checked must be +specified as 32 hexadecimal digits. + +\item [{\bf -d}{\it input\_text} ] + \index{-dinput\_text } + A signature is computed for the given {\it input\_text} (which must be quoted +if it contains white space characters) instead of input from {\it infile} or +standard input. If input is specified with the {\bf -d} option, no {\it +infile} should be specified. + +\item [{\bf -u} ] + Print how-to-call information. + \end{description} + +\subsection{Files} +\index{Files } +\addcontentsline{toc}{subsubsection}{Files} + +If no {\it infile} or {\bf -d} option is specified or {\it infile} is a single +``-'', {\bf md5} reads from standard input; if no {\it outfile} is given, or +{\it outfile} is a single ``-'', output is sent to standard output. Input and +output are processed strictly serially; consequently {\bf md5} may be used in +pipelines. + +\subsection{Bugs} +\index{Bugs } +\addcontentsline{toc}{subsubsection}{Bugs} + +The mechanism used to set standard input to binary mode may be specific to +Microsoft C; if you rebuild the DOS/Windows version of the program from source +using another compiler, be sure to verify binary files work properly when read +via redirection or a pipe. + +This program has not been tested on a machine on which {\tt int} and/or {\tt +long} are longer than 32 bits. + +\section{ +\elink{Download md5.zip}{http://www.fourmilab.ch/md5/md5.zip} (Zipped +archive)} +\index{Archive!Download md5.zip Zipped } +\index{Download md5.zip (Zipped archive) } +\addcontentsline{toc}{subsection}{Download md5.zip (Zipped archive)} + +The program is provided as +\elink{md5.zip}{http://www.fourmilab.ch/md5/md5.zip}, a +\elink{Zipped}{http://www.pkware.com/} archive containing an ready-to-run +Win32 command-line executable program, {\tt md5.exe} (compiled using Microsoft +Visual C++ 5.0), and in source code form along with a {\tt Makefile} to build +the program under Unix. + +\subsection{See Also} +\index{ALSO!SEE } +\index{See Also } +\addcontentsline{toc}{subsubsection}{SEE ALSO} + +{\bf sum}(1) + +\subsection{Exit Status} +\index{Status!Exit } +\index{Exit Status } +\addcontentsline{toc}{subsubsection}{Exit Status} + +{\bf md5} returns status 0 if processing was completed without errors, 1 if +the {\bf -c} option was specified and the given signature does not match that +of the input, and 2 if processing could not be performed at all due, for +example, to a nonexistent input file. + +\subsection{Copying} +\index{Copying } +\addcontentsline{toc}{subsubsection}{Copying} + +\begin{quote} +This software is in the public domain. Permission to use, copy, modify, and +distribute this software and its documentation for any purpose and without +fee is hereby granted, without any conditions or restrictions. This software +is provided ``as is'' without express or implied warranty. +\end{quote} + +\subsection{Acknowledgements} +\index{Acknowledgements } +\addcontentsline{toc}{subsubsection}{Acknowledgements} + +The MD5 algorithm was developed by Ron Rivest. The public domain C language +implementation used in this program was written by Colin Plumb in 1993. +{\it +\elink{by John Walker}{http://www.fourmilab.ch/} +January 6th, MIM } diff --git a/docs/manuals/de/developers/mediaformat.tex b/docs/manuals/de/developers/mediaformat.tex new file mode 100644 index 00000000..cc824f78 --- /dev/null +++ b/docs/manuals/de/developers/mediaformat.tex @@ -0,0 +1,1115 @@ +%% +%% + +\chapter{Storage Media Output Format} +\label{_ChapterStart9} +\index{Format!Storage Media Output} +\index{Storage Media Output Format} +\addcontentsline{toc}{section}{Storage Media Output Format} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the media format written by the Storage daemon. The +Storage daemon reads and writes in units of blocks. Blocks contain records. +Each block has a block header followed by records, and each record has a +record header followed by record data. + +This chapter is intended to be a technical discussion of the Media Format and +as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +\section{Definitions} +\index{Definitions} +\addcontentsline{toc}{subsection}{Definitions} + +\begin{description} + +\item [Block] + \index{Block} + A block represents the primitive unit of information that the Storage daemon +reads and writes to a physical device. Normally, for a tape device, it will +be the same as a tape block. The Storage daemon always reads and writes +blocks. A block consists of block header information followed by records. +Clients of the Storage daemon (the File daemon) normally never see blocks. +However, some of the Storage tools (bls, bscan, bextract, ...) may be use +block header information. In older Bacula tape versions, a block could +contain records (see record definition below) from multiple jobs. However, +all blocks currently written by Bacula are block level BB02, and a given +block contains records for only a single job. Different jobs simply have +their own private blocks that are intermingled with the other blocks from +other jobs on the Volume (previously the records were intermingled within +the blocks). Having only records from a single job in any give block +permitted moving the VolumeSessionId and VolumeSessionTime (see below) from +each record heading to the Block header. This has two advantages: 1. a block +can be quickly rejected based on the contents of the header without reading +all the records. 2. because there is on the average more than one record per +block, less data is written to the Volume for each job. + +\item [Record] + \index{Record} + A record consists of a Record Header, which is managed by the Storage daemon +and Record Data, which is the data received from the Client. A record is the +primitive unit of information sent to and from the Storage daemon by the +Client (File daemon) programs. The details are described below. + +\item [JobId] + \index{JobId} + A number assigned by the Director daemon for a particular job. This number +will be unique for that particular Director (Catalog). The daemons use this +number to keep track of individual jobs. Within the Storage daemon, the JobId +may not be unique if several Directors are accessing the Storage daemon +simultaneously. + +\item [Session] + \index{Session} + A Session is a concept used in the Storage daemon corresponds one to one to a +Job with the exception that each session is uniquely identified within the +Storage daemon by a unique SessionId/SessionTime pair (see below). + +\item [VolSessionId] + \index{VolSessionId} + A unique number assigned by the Storage daemon to a particular session (Job) +it is having with a File daemon. This number by itself is not unique to the +given Volume, but with the VolSessionTime, it is unique. + +\item [VolSessionTime] + \index{VolSessionTime} + A unique number assigned by the Storage daemon to a particular Storage daemon +execution. It is actually the Unix time\_t value of when the Storage daemon +began execution cast to a 32 bit unsigned integer. The combination of the +{\bf VolSessionId} and the {\bf VolSessionTime} for a given Storage daemon is +guaranteed to be unique for each Job (or session). + +\item [FileIndex] + \index{FileIndex} + A sequential number beginning at one assigned by the File daemon to the files +within a job that are sent to the Storage daemon for backup. The Storage +daemon ensures that this number is greater than zero and sequential. Note, +the Storage daemon uses negative FileIndexes to flag Session Start and End +Labels as well as End of Volume Labels. Thus, the combination of +VolSessionId, VolSessionTime, and FileIndex uniquely identifies the records +for a single file written to a Volume. + +\item [Stream] + \index{Stream} + While writing the information for any particular file to the Volume, there +can be any number of distinct pieces of information about that file, e.g. the +attributes, the file data, ... The Stream indicates what piece of data it +is, and it is an arbitrary number assigned by the File daemon to the parts +(Unix attributes, Win32 attributes, data, compressed data,\ ...) of a file +that are sent to the Storage daemon. The Storage daemon has no knowledge of +the details of a Stream; it simply represents a numbered stream of bytes. The +data for a given stream may be passed to the Storage daemon in single record, +or in multiple records. + +\item [Block Header] + \index{Block Header} + A block header consists of a block identification (``BB02''), a block length +in bytes (typically 64,512) a checksum, and sequential block number. Each +block starts with a Block Header and is followed by Records. Current block +headers also contain the VolSessionId and VolSessionTime for the records +written to that block. + +\item [Record Header] + \index{Record Header} + A record header contains the Volume Session Id, the Volume Session Time, the +FileIndex, the Stream, and the size of the data record which follows. The +Record Header is always immediately followed by a Data Record if the size +given in the Header is greater than zero. Note, for Block headers of level +BB02 (version 1.27 and later), the Record header as written to tape does not +contain the Volume Session Id and the Volume Session Time as these two +fields are stored in the BB02 Block header. The in-memory record header does +have those fields for convenience. + +\item [Data Record] + \index{Data Record} + A data record consists of a binary stream of bytes and is always preceded by +a Record Header. The details of the meaning of the binary stream of bytes are +unknown to the Storage daemon, but the Client programs (File daemon) defines +and thus knows the details of each record type. + +\item [Volume Label] + \index{Volume Label} + A label placed by the Storage daemon at the beginning of each storage volume. +It contains general information about the volume. It is written in Record +format. The Storage daemon manages Volume Labels, and if the client wants, he +may also read them. + +\item [Begin Session Label] + \index{Begin Session Label} + The Begin Session Label is a special record placed by the Storage daemon on +the storage medium as the first record of an append session job with a File +daemon. This record is useful for finding the beginning of a particular +session (Job), since no records with the same VolSessionId and VolSessionTime +will precede this record. This record is not normally visible outside of the +Storage daemon. The Begin Session Label is similar to the Volume Label except +that it contains additional information pertaining to the Session. + +\item [End Session Label] + \index{End Session Label} + The End Session Label is a special record placed by the Storage daemon on the +storage medium as the last record of an append session job with a File +daemon. The End Session Record is distinguished by a FileIndex with a value +of minus two (-2). This record is useful for detecting the end of a +particular session since no records with the same VolSessionId and +VolSessionTime will follow this record. This record is not normally visible +outside of the Storage daemon. The End Session Label is similar to the Volume +Label except that it contains additional information pertaining to the +Session. +\end{description} + +\section{Storage Daemon File Output Format} +\index{Format!Storage Daemon File Output} +\index{Storage Daemon File Output Format} +\addcontentsline{toc}{subsection}{Storage Daemon File Output Format} + +The file storage and tape storage formats are identical except that tape +records are by default blocked into blocks of 64,512 bytes, except for the +last block, which is the actual number of bytes written rounded up to a +multiple of 1024 whereas the last record of file storage is not rounded up. +The default block size of 64,512 bytes may be overridden by the user (some +older tape drives only support block sizes of 32K). Each Session written to +tape is terminated with an End of File mark (this will be removed later). +Sessions written to file are simply appended to the end of the file. + +\section{Overall Format} +\index{Format!Overall} +\index{Overall Format} +\addcontentsline{toc}{subsection}{Overall Format} + +A Bacula output file consists of Blocks of data. Each block contains a block +header followed by records. Each record consists of a record header followed +by the record data. The first record on a tape will always be the Volume Label +Record. + +No Record Header will be split across Bacula blocks. However, Record Data may +be split across any number of Bacula blocks. Obviously this will not be the +case for the Volume Label which will always be smaller than the Bacula Block +size. + +To simplify reading tapes, the Start of Session (SOS) and End of Session (EOS) +records are never split across blocks. If this is about to happen, Bacula will +write a short block before writing the session record (actually, the SOS +record should always be the first record in a block, excepting perhaps the +Volume label). + +Due to hardware limitations, the last block written to the tape may not be +fully written. If your drive permits backspace record, Bacula will backup over +the last record written on the tape, re-read it and verify that it was +correctly written. + +When a new tape is mounted Bacula will write the full contents of the +partially written block to the new tape ensuring that there is no loss of +data. When reading a tape, Bacula will discard any block that is not totally +written, thus ensuring that there is no duplication of data. In addition, +since Bacula blocks are sequentially numbered within a Job, it is easy to +ensure that no block is missing or duplicated. + +\section{Serialization} +\index{Serialization} +\addcontentsline{toc}{subsection}{Serialization} + +All Block Headers, Record Headers, and Label Records are written using +Bacula's serialization routines. These routines guarantee that the data is +written to the output volume in a machine independent format. + +\section{Block Header} +\index{Header!Block} +\index{Block Header} +\addcontentsline{toc}{subsection}{Block Header} + +The format of the Block Header (version 1.27 and later) is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB02"; /* Identification and block level */ + uint32_t VolSessionId; /* Session Id for Job */ + uint32_t VolSessionTime; /* Session Time for Job */ +\end{verbatim} +\normalsize + +The Block header is a fixed length and fixed format and is followed by Record +Headers and Record Data. The CheckSum field is a 32 bit checksum of the block +data and the block header but not including the CheckSum field. The Block +Header is always immediately followed by a Record Header. If the tape is +damaged, a Bacula utility will be able to recover as much information as +possible from the tape by recovering blocks which are valid. The Block header +is written using the Bacula serialization routines and thus is guaranteed to +be in machine independent format. See below for version 2 of the block header. + + +\section{Record Header} +\index{Header!Record} +\index{Record Header} +\addcontentsline{toc}{subsection}{Record Header} + +Each binary data record is preceded by a Record Header. The Record Header is +fixed length and fixed format, whereas the binary data record is of variable +length. The Record Header is written using the Bacula serialization routines +and thus is guaranteed to be in machine independent format. + +The format of the Record Header (version 1.27 or later) is: + +\footnotesize +\begin{verbatim} + int32_t FileIndex; /* File index supplied by File daemon */ + int32_t Stream; /* Stream number supplied by File daemon */ + uint32_t DataSize; /* size of following data record in bytes */ +\end{verbatim} +\normalsize + +This record is followed by the binary Stream data of DataSize bytes, followed +by another Record Header record and the binary stream data. For the definitive +definition of this record, see record.h in the src/stored directory. + +Additional notes on the above: + +\begin{description} + +\item [The {\bf VolSessionId} ] + \index{VolSessionId} + is a unique sequential number that is assigned by the Storage Daemon to a +particular Job. This number is sequential since the start of execution of the +daemon. + +\item [The {\bf VolSessionTime} ] + \index{VolSessionTime} + is the time/date that the current execution of the Storage Daemon started. It +assures that the combination of VolSessionId and VolSessionTime is unique for +every jobs written to the tape, even if there was a machine crash between two +writes. + +\item [The {\bf FileIndex} ] + \index{FileIndex} + is a sequential file number within a job. The Storage daemon requires this +index to be greater than zero and sequential. Note, however, that the File +daemon may send multiple Streams for the same FileIndex. In addition, the +Storage daemon uses negative FileIndices to hold the Begin Session Label, the +End Session Label, and the End of Volume Label. + +\item [The {\bf Stream} ] + \index{Stream} + is defined by the File daemon and is used to identify separate parts of the +data saved for each file (Unix attributes, Win32 attributes, file data, +compressed file data, sparse file data, ...). The Storage Daemon has no idea +of what a Stream is or what it contains except that the Stream is required to +be a positive integer. Negative Stream numbers are used internally by the +Storage daemon to indicate that the record is a continuation of the previous +record (the previous record would not entirely fit in the block). + +For Start Session and End Session Labels (where the FileIndex is negative), +the Storage daemon uses the Stream field to contain the JobId. The current +stream definitions are: + +\footnotesize +\begin{verbatim} +#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */ +#define STREAM_FILE_DATA 2 /* Standard uncompressed data */ +#define STREAM_MD5_SIGNATURE 3 /* MD5 signature for the file */ +#define STREAM_GZIP_DATA 4 /* GZip compressed file data */ +/* Extended Unix attributes with Win32 Extended data. Deprecated. */ +#define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX */ +#define STREAM_SPARSE_DATA 6 /* Sparse data stream */ +#define STREAM_SPARSE_GZIP_DATA 7 +#define STREAM_PROGRAM_NAMES 8 /* program names for program data */ +#define STREAM_PROGRAM_DATA 9 /* Data needing program */ +#define STREAM_SHA1_SIGNATURE 10 /* SHA1 signature for the file */ +#define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */ +#define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */ +#define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */ +#define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */ +#define STREAM_UNIX_ATTRIBUTES_ACCESS_ACL 15 /* Standard ACL attributes on UNIX */ +#define STREAM_UNIX_ATTRIBUTES_DEFAULT_ACL 16 /* Default ACL attributes on UNIX */ +\end{verbatim} +\normalsize + +\item [The {\bf DataSize} ] + \index{DataSize} + is the size in bytes of the binary data record that follows the Session +Record header. The Storage Daemon has no idea of the actual contents of the +binary data record. For standard Unix files, the data record typically +contains the file attributes or the file data. For a sparse file the first +64 bits of the file data contains the storage address for the data block. +\end{description} + +The Record Header is never split across two blocks. If there is not enough +room in a block for the full Record Header, the block is padded to the end +with zeros and the Record Header begins in the next block. The data record, on +the other hand, may be split across multiple blocks and even multiple physical +volumes. When a data record is split, the second (and possibly subsequent) +piece of the data is preceded by a new Record Header. Thus each piece of data +is always immediately preceded by a Record Header. When reading a record, if +Bacula finds only part of the data in the first record, it will automatically +read the next record and concatenate the data record to form a full data +record. + +\section{Version BB02 Block Header} +\index{Version BB02 Block Header} +\index{Header!Version BB02 Block} +\addcontentsline{toc}{subsection}{Version BB02 Block Header} + +Each session or Job has its own private block. As a consequence, the SessionId +and SessionTime are written once in each Block Header and not in the Record +Header. So, the second and current version of the Block Header BB02 is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB02"; /* Identification and block level */ + uint32_t VolSessionId; /* Applies to all records */ + uint32_t VolSessionTime; /* contained in this block */ +\end{verbatim} +\normalsize + +As with the previous version, the BB02 Block header is a fixed length and +fixed format and is followed by Record Headers and Record Data. The CheckSum +field is a 32 bit CRC checksum of the block data and the block header but not +including the CheckSum field. The Block Header is always immediately followed +by a Record Header. If the tape is damaged, a Bacula utility will be able to +recover as much information as possible from the tape by recovering blocks +which are valid. The Block header is written using the Bacula serialization +routines and thus is guaranteed to be in machine independent format. + +\section{Version 2 Record Header} +\index{Version 2 Record Header} +\index{Header!Version 2 Record} +\addcontentsline{toc}{subsection}{Version 2 Record Header} + +Version 2 Record Header is written to the medium when using Version BB02 Block +Headers. The memory representation of the record is identical to the old BB01 +Record Header, but on the storage medium, the first two fields, namely +VolSessionId and VolSessionTime are not written. The Block Header is filled +with these values when the First user record is written (i.e. non label +record) so that when the block is written, it will have the current and unique +VolSessionId and VolSessionTime. On reading each record from the Block, the +VolSessionId and VolSessionTime is filled in the Record Header from the Block +Header. + +\section{Volume Label Format} +\index{Volume Label Format} +\index{Format!Volume Label} +\addcontentsline{toc}{subsection}{Volume Label Format} + +Tape volume labels are created by the Storage daemon in response to a {\bf +label} command given to the Console program, or alternatively by the {\bf +btape} program. created. Each volume is labeled with the following information +using the Bacula serialization routines, which guarantee machine byte order +independence. + +For Bacula versions 1.27 and later, the Volume Label Format is: + +\footnotesize +\begin{verbatim} + char Id[32]; /* Bacula 1.0 Immortal\n */ + uint32_t VerNum; /* Label version number */ + /* VerNum 11 and greater Bacula 1.27 and later */ + btime_t label_btime; /* Time/date tape labeled */ + btime_t write_btime; /* Time/date tape first written */ + /* The following are 0 in VerNum 11 and greater */ + float64_t write_date; /* Date this label written */ + float64_t write_time; /* Time this label written */ + char VolName[128]; /* Volume name */ + char PrevVolName[128]; /* Previous Volume Name */ + char PoolName[128]; /* Pool name */ + char PoolType[128]; /* Pool type */ + char MediaType[128]; /* Type of this media */ + char HostName[128]; /* Host name of writing computer */ + char LabelProg[32]; /* Label program name */ + char ProgVersion[32]; /* Program version */ + char ProgDate[32]; /* Program build date/time */ +\end{verbatim} +\normalsize + +Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, ...) +is stored in the record FileIndex field of the Record Header and does not +appear in the data part of the record. + +\section{Session Label} +\index{Label!Session} +\index{Session Label} +\addcontentsline{toc}{subsection}{Session Label} + +The Session Label is written at the beginning and end of each session as well +as the last record on the physical medium. It has the following binary format: + + +\footnotesize +\begin{verbatim} + char Id[32]; /* Bacula Immortal ... */ + uint32_t VerNum; /* Label version number */ + uint32_t JobId; /* Job id */ + uint32_t VolumeIndex; /* sequence no of vol */ + /* Prior to VerNum 11 */ + float64_t write_date; /* Date this label written */ + /* VerNum 11 and greater */ + btime_t write_btime; /* time/date record written */ + /* The following is zero VerNum 11 and greater */ + float64_t write_time; /* Time this label written */ + char PoolName[128]; /* Pool name */ + char PoolType[128]; /* Pool type */ + char JobName[128]; /* base Job name */ + char ClientName[128]; + /* Added in VerNum 10 */ + char Job[128]; /* Unique Job name */ + char FileSetName[128]; /* FileSet name */ + uint32_t JobType; + uint32_t JobLevel; +\end{verbatim} +\normalsize + +In addition, the EOS label contains: + +\footnotesize +\begin{verbatim} + /* The remainder are part of EOS label only */ + uint32_t JobFiles; + uint64_t JobBytes; + uint32_t start_block; + uint32_t end_block; + uint32_t start_file; + uint32_t end_file; + uint32_t JobErrors; +\end{verbatim} +\normalsize + +In addition, for VerNum greater than 10, the EOS label contains (in addition +to the above): + +\footnotesize +\begin{verbatim} + uint32_t JobStatus /* Job termination code */ +\end{verbatim} +\normalsize + +: Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, +...) is stored in the record FileIndex field and does not appear in the data +part of the record. Also, the Stream field of the Record Header contains the +JobId. This permits quick filtering without actually reading all the session +data in many cases. + +\section{Overall Storage Format} +\index{Format!Overall Storage} +\index{Overall Storage Format} +\addcontentsline{toc}{subsection}{Overall Storage Format} + +\footnotesize +\begin{verbatim} + Current Bacula Tape Format + 6 June 2001 + Version BB02 added 28 September 2002 + Version BB01 is the old deprecated format. + A Bacula tape is composed of tape Blocks. Each block + has a Block header followed by the block data. Block + Data consists of Records. Records consist of Record + Headers followed by Record Data. + :=======================================================: + | | + | Block Header (24 bytes) | + | | + |-------------------------------------------------------| + | | + | Record Header (12 bytes) | + | | + |-------------------------------------------------------| + | | + | Record Data | + | | + |-------------------------------------------------------| + | | + | Record Header (12 bytes) | + | | + |-------------------------------------------------------| + | | + | ... | + Block Header: the first item in each block. The format is + shown below. + Partial Data block: occurs if the data from a previous + block spills over to this block (the normal case except + for the first block on a tape). However, this partial + data block is always preceded by a record header. + Record Header: identifies the Volume Session, the Stream + and the following Record Data size. See below for format. + Record data: arbitrary binary data. + Block Header Format BB02 + :=======================================================: + | CheckSum (uint32_t) | + |-------------------------------------------------------| + | BlockSize (uint32_t) | + |-------------------------------------------------------| + | BlockNumber (uint32_t) | + |-------------------------------------------------------| + | "BB02" (char [4]) | + |-------------------------------------------------------| + | VolSessionId (uint32_t) | + |-------------------------------------------------------| + | VolSessionTime (uint32_t) | + :=======================================================: + BBO2: Serves to identify the block as a + Bacula block and also servers as a block format identifier + should we ever need to change the format. + BlockSize: is the size in bytes of the block. When reading + back a block, if the BlockSize does not agree with the + actual size read, Bacula discards the block. + CheckSum: a checksum for the Block. + BlockNumber: is the sequential block number on the tape. + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + Record Header Format BB02 + :=======================================================: + | FileIndex (int32_t) | + |-------------------------------------------------------| + | Stream (int32_t) | + |-------------------------------------------------------| + | DataSize (uint32_t) | + :=======================================================: + FileIndex: a sequential file number within a job. The + Storage daemon enforces this index to be + greater than zero and sequential. Note, + however, that the File daemon may send + multiple Streams for the same FileIndex. + The Storage Daemon uses negative FileIndices + to identify Session Start and End labels + as well as the End of Volume labels. + Stream: defined by the File daemon and is intended to be + used to identify separate parts of the data + saved for each file (attributes, file data, + ...). The Storage Daemon has no idea of + what a Stream is or what it contains. + DataSize: the size in bytes of the binary data record + that follows the Session Record header. + The Storage Daemon has no idea of the + actual contents of the binary data record. + For standard Unix files, the data record + typically contains the file attributes or + the file data. For a sparse file + the first 64 bits of the data contains + the storage address for the data block. + Volume Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | label_date (float64_t) | + | label_btime (btime_t VerNum 11 | + |-------------------------------------------------------| + | label_time (float64_t) | + | write_btime (btime_t VerNum 11 | + |-------------------------------------------------------| + | write_date (float64_t) | + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | write_time (float64_t) | + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | VolName (128 bytes) | + |-------------------------------------------------------| + | PrevVolName (128 bytes) | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | MediaType (128 bytes) | + |-------------------------------------------------------| + | HostName (128 bytes) | + |-------------------------------------------------------| + | LabelProg (32 bytes) | + |-------------------------------------------------------| + | ProgVersion (32 bytes) | + |-------------------------------------------------------| + | ProgDate (32 bytes) | + |-------------------------------------------------------| + :=======================================================: + + Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n" + (old version also recognized:) + Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n" + LabelType (Saved in the FileIndex of the Header record). + PRE_LABEL -1 Volume label on unwritten tape + VOL_LABEL -2 Volume label after tape written + EOM_LABEL -3 Label at EOM (not currently implemented) + SOS_LABEL -4 Start of Session label (format given below) + EOS_LABEL -5 End of Session label (format given below) + VerNum: 11 + label_date: Julian day tape labeled + label_time: Julian time tape labeled + write_date: Julian date tape first used (data written) + write_time: Julian time tape first used (data written) + VolName: "Physical" Volume name + PrevVolName: The VolName of the previous tape (if this tape is + a continuation of the previous one). + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + HostName: Name of host that is first writing the tape + LabelProg: Name of the program that labeled the tape + ProgVersion: Version of the label program + ProgDate: Date Label program built + Session Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | JobId (uint32_t) | + |-------------------------------------------------------| + | write_btime (btime_t) VerNum 11 | + |-------------------------------------------------------| + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | JobName (128 bytes) | + |-------------------------------------------------------| + | ClientName (128 bytes) | + |-------------------------------------------------------| + | Job (128 bytes) | + |-------------------------------------------------------| + | FileSetName (128 bytes) | + |-------------------------------------------------------| + | JobType (uint32_t) | + |-------------------------------------------------------| + | JobLevel (uint32_t) | + |-------------------------------------------------------| + | FileSetMD5 (50 bytes) VerNum 11 | + |-------------------------------------------------------| + Additional fields in End Of Session Label + |-------------------------------------------------------| + | JobFiles (uint32_t) | + |-------------------------------------------------------| + | JobBytes (uint32_t) | + |-------------------------------------------------------| + | start_block (uint32_t) | + |-------------------------------------------------------| + | end_block (uint32_t) | + |-------------------------------------------------------| + | start_file (uint32_t) | + |-------------------------------------------------------| + | end_file (uint32_t) | + |-------------------------------------------------------| + | JobErrors (uint32_t) | + |-------------------------------------------------------| + | JobStatus (uint32_t) VerNum 11 | + :=======================================================: + * => fields deprecated + Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n" + LabelType (in FileIndex field of Header): + EOM_LABEL -3 Label at EOM + SOS_LABEL -4 Start of Session label + EOS_LABEL -5 End of Session label + VerNum: 11 + JobId: JobId + write_btime: Bacula time/date this tape record written + write_date: Julian date tape this record written - deprecated + write_time: Julian time tape this record written - deprecated. + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + ClientName: Name of File daemon or Client writing this session + Not used for EOM_LABEL. +\end{verbatim} +\normalsize + +\section{Unix File Attributes} +\index{Unix File Attributes} +\index{Attributes!Unix File} +\addcontentsline{toc}{subsection}{Unix File Attributes} + +The Unix File Attributes packet consists of the following: + +\lt{}File-Index\gt{} \lt{}Type\gt{} +\lt{}Filename\gt{}@\lt{}File-Attributes\gt{}@\lt{}Link\gt{} +@\lt{}Extended-Attributes@\gt{} where + +\begin{description} + +\item [@] + represents a byte containing a binary zero. + +\item [FileIndex] + \index{FileIndex} + is the sequential file index starting from one assigned by the File daemon. + +\item [Type] + \index{Type} + is one of the following: + +\footnotesize +\begin{verbatim} +#define FT_LNKSAVED 1 /* hard link to file already saved */ +#define FT_REGE 2 /* Regular file but empty */ +#define FT_REG 3 /* Regular file */ +#define FT_LNK 4 /* Soft Link */ +#define FT_DIR 5 /* Directory */ +#define FT_SPEC 6 /* Special file -- chr, blk, fifo, sock */ +#define FT_NOACCESS 7 /* Not able to access */ +#define FT_NOFOLLOW 8 /* Could not follow link */ +#define FT_NOSTAT 9 /* Could not stat file */ +#define FT_NOCHG 10 /* Incremental option, file not changed */ +#define FT_DIRNOCHG 11 /* Incremental option, directory not changed */ +#define FT_ISARCH 12 /* Trying to save archive file */ +#define FT_NORECURSE 13 /* No recursion into directory */ +#define FT_NOFSCHG 14 /* Different file system, prohibited */ +#define FT_NOOPEN 15 /* Could not open directory */ +#define FT_RAW 16 /* Raw block device */ +#define FT_FIFO 17 /* Raw fifo device */ +\end{verbatim} +\normalsize + +\item [Filename] + \index{Filename} + is the fully qualified filename. + +\item [File-Attributes] + \index{File-Attributes} + consists of the 13 fields of the stat() buffer in ASCII base64 format +separated by spaces. These fields and their meanings are shown below. This +stat() packet is in Unix format, and MUST be provided (constructed) for ALL +systems. + +\item [Link] + \index{Link} + when the FT code is FT\_LNK or FT\_LNKSAVED, the item in question is a Unix +link, and this field contains the fully qualified link name. When the FT code +is not FT\_LNK or FT\_LNKSAVED, this field is null. + +\item [Extended-Attributes] + \index{Extended-Attributes} + The exact format of this field is operating system dependent. It contains +additional or extended attributes of a system dependent nature. Currently, +this field is used only on WIN32 systems where it contains a ASCII base64 +representation of the WIN32\_FILE\_ATTRIBUTE\_DATA structure as defined by +Windows. The fields in the base64 representation of this structure are like +the File-Attributes separated by spaces. +\end{description} + +The File-attributes consist of the following: + +\addcontentsline{lot}{table}{File Attributes} +\begin{longtable}{|p{0.6in}|p{0.7in}|p{1in}|p{1in}|p{1.4in}|} + \hline +\multicolumn{1}{|c|}{\bf Field No. } & \multicolumn{1}{c|}{\bf Stat Name } +& \multicolumn{1}{c|}{\bf Unix } & \multicolumn{1}{c|}{\bf Win98/NT } & +\multicolumn{1}{c|}{\bf MacOS } \\ + \hline +\multicolumn{1}{|c|}{1 } & {st\_dev } & {Device number of filesystem } & +{Drive number } & {vRefNum } \\ + \hline +\multicolumn{1}{|c|}{2 } & {st\_ino } & {Inode number } & {Always 0 } & +{fileID/dirID } \\ + \hline +\multicolumn{1}{|c|}{3 } & {st\_mode } & {File mode } & {File mode } & +{777 dirs/apps; 666 docs; 444 locked docs } \\ + \hline +\multicolumn{1}{|c|}{4 } & {st\_nlink } & {Number of links to the file } & +{Number of link (only on NTFS) } & {Always 1 } \\ + \hline +\multicolumn{1}{|c|}{5 } & {st\_uid } & {Owner ID } & {Always 0 } & +{Always 0 } \\ + \hline +\multicolumn{1}{|c|}{6 } & {st\_gid } & {Group ID } & {Always 0 } & +{Always 0 } \\ + \hline +\multicolumn{1}{|c|}{7 } & {st\_rdev } & {Device ID for special files } & +{Drive No. } & {Always 0 } \\ + \hline +\multicolumn{1}{|c|}{8 } & {st\_size } & {File size in bytes } & {File +size in bytes } & {Data fork file size in bytes } \\ + \hline +\multicolumn{1}{|c|}{9 } & {st\_blksize } & {Preferred block size } & +{Always 0 } & {Preferred block size } \\ + \hline +\multicolumn{1}{|c|}{10 } & {st\_blocks } & {Number of blocks allocated } +& {Always 0 } & {Number of blocks allocated } \\ + \hline +\multicolumn{1}{|c|}{11 } & {st\_atime } & {Last access time since epoch } +& {Last access time since epoch } & {Last access time -66 years } \\ + \hline +\multicolumn{1}{|c|}{12 } & {st\_mtime } & {Last modify time since epoch } +& {Last modify time since epoch } & {Last access time -66 years } \\ + \hline +\multicolumn{1}{|c|}{13 } & {st\_ctime } & {Inode change time since epoch +} & {File create time since epoch } & {File create time -66 years} +\\ \hline + +\end{longtable} + +\section{Old Depreciated Tape Format} +\index{Old Depreciated Tape Format} +\index{Format!Old Depreciated Tape} +\addcontentsline{toc}{subsection}{Old Depreciated Tape Format} + +The format of the Block Header (version 1.26 and earlier) is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB01"; /* Identification and block level */ +\end{verbatim} +\normalsize + +The format of the Record Header (version 1.26 or earlier) is: + +\footnotesize +\begin{verbatim} + uint32_t VolSessionId; /* Unique ID for this session */ + uint32_t VolSessionTime; /* Start time/date of session */ + int32_t FileIndex; /* File index supplied by File daemon */ + int32_t Stream; /* Stream number supplied by File daemon */ + uint32_t DataSize; /* size of following data record in bytes */ +\end{verbatim} +\normalsize + +\footnotesize +\begin{verbatim} + Current Bacula Tape Format + 6 June 2001 + Version BB01 is the old deprecated format. + A Bacula tape is composed of tape Blocks. Each block + has a Block header followed by the block data. Block + Data consists of Records. Records consist of Record + Headers followed by Record Data. + :=======================================================: + | | + | Block Header | + | (16 bytes version BB01) | + |-------------------------------------------------------| + | | + | Record Header | + | (20 bytes version BB01) | + |-------------------------------------------------------| + | | + | Record Data | + | | + |-------------------------------------------------------| + | | + | Record Header | + | (20 bytes version BB01) | + |-------------------------------------------------------| + | | + | ... | + Block Header: the first item in each block. The format is + shown below. + Partial Data block: occurs if the data from a previous + block spills over to this block (the normal case except + for the first block on a tape). However, this partial + data block is always preceded by a record header. + Record Header: identifies the Volume Session, the Stream + and the following Record Data size. See below for format. + Record data: arbitrary binary data. + Block Header Format BB01 (deprecated) + :=======================================================: + | CheckSum (uint32_t) | + |-------------------------------------------------------| + | BlockSize (uint32_t) | + |-------------------------------------------------------| + | BlockNumber (uint32_t) | + |-------------------------------------------------------| + | "BB01" (char [4]) | + :=======================================================: + BBO1: Serves to identify the block as a + Bacula block and also servers as a block format identifier + should we ever need to change the format. + BlockSize: is the size in bytes of the block. When reading + back a block, if the BlockSize does not agree with the + actual size read, Bacula discards the block. + CheckSum: a checksum for the Block. + BlockNumber: is the sequential block number on the tape. + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + Record Header Format BB01 (deprecated) + :=======================================================: + | VolSessionId (uint32_t) | + |-------------------------------------------------------| + | VolSessionTime (uint32_t) | + |-------------------------------------------------------| + | FileIndex (int32_t) | + |-------------------------------------------------------| + | Stream (int32_t) | + |-------------------------------------------------------| + | DataSize (uint32_t) | + :=======================================================: + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + FileIndex: a sequential file number within a job. The + Storage daemon enforces this index to be + greater than zero and sequential. Note, + however, that the File daemon may send + multiple Streams for the same FileIndex. + The Storage Daemon uses negative FileIndices + to identify Session Start and End labels + as well as the End of Volume labels. + Stream: defined by the File daemon and is intended to be + used to identify separate parts of the data + saved for each file (attributes, file data, + ...). The Storage Daemon has no idea of + what a Stream is or what it contains. + DataSize: the size in bytes of the binary data record + that follows the Session Record header. + The Storage Daemon has no idea of the + actual contents of the binary data record. + For standard Unix files, the data record + typically contains the file attributes or + the file data. For a sparse file + the first 64 bits of the data contains + the storage address for the data block. + Volume Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | label_date (float64_t) | + |-------------------------------------------------------| + | label_time (float64_t) | + |-------------------------------------------------------| + | write_date (float64_t) | + |-------------------------------------------------------| + | write_time (float64_t) | + |-------------------------------------------------------| + | VolName (128 bytes) | + |-------------------------------------------------------| + | PrevVolName (128 bytes) | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | MediaType (128 bytes) | + |-------------------------------------------------------| + | HostName (128 bytes) | + |-------------------------------------------------------| + | LabelProg (32 bytes) | + |-------------------------------------------------------| + | ProgVersion (32 bytes) | + |-------------------------------------------------------| + | ProgDate (32 bytes) | + |-------------------------------------------------------| + :=======================================================: + + Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n" + (old version also recognized:) + Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n" + LabelType (Saved in the FileIndex of the Header record). + PRE_LABEL -1 Volume label on unwritten tape + VOL_LABEL -2 Volume label after tape written + EOM_LABEL -3 Label at EOM (not currently implemented) + SOS_LABEL -4 Start of Session label (format given below) + EOS_LABEL -5 End of Session label (format given below) + label_date: Julian day tape labeled + label_time: Julian time tape labeled + write_date: Julian date tape first used (data written) + write_time: Julian time tape first used (data written) + VolName: "Physical" Volume name + PrevVolName: The VolName of the previous tape (if this tape is + a continuation of the previous one). + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + HostName: Name of host that is first writing the tape + LabelProg: Name of the program that labeled the tape + ProgVersion: Version of the label program + ProgDate: Date Label program built + Session Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | JobId (uint32_t) | + |-------------------------------------------------------| + | *write_date (float64_t) VerNum 10 | + |-------------------------------------------------------| + | *write_time (float64_t) VerNum 10 | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | JobName (128 bytes) | + |-------------------------------------------------------| + | ClientName (128 bytes) | + |-------------------------------------------------------| + | Job (128 bytes) | + |-------------------------------------------------------| + | FileSetName (128 bytes) | + |-------------------------------------------------------| + | JobType (uint32_t) | + |-------------------------------------------------------| + | JobLevel (uint32_t) | + |-------------------------------------------------------| + | FileSetMD5 (50 bytes) VerNum 11 | + |-------------------------------------------------------| + Additional fields in End Of Session Label + |-------------------------------------------------------| + | JobFiles (uint32_t) | + |-------------------------------------------------------| + | JobBytes (uint32_t) | + |-------------------------------------------------------| + | start_block (uint32_t) | + |-------------------------------------------------------| + | end_block (uint32_t) | + |-------------------------------------------------------| + | start_file (uint32_t) | + |-------------------------------------------------------| + | end_file (uint32_t) | + |-------------------------------------------------------| + | JobErrors (uint32_t) | + |-------------------------------------------------------| + | JobStatus (uint32_t) VerNum 11 | + :=======================================================: + * => fields deprecated + Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n" + LabelType (in FileIndex field of Header): + EOM_LABEL -3 Label at EOM + SOS_LABEL -4 Start of Session label + EOS_LABEL -5 End of Session label + VerNum: 11 + JobId: JobId + write_btime: Bacula time/date this tape record written + write_date: Julian date tape this record written - deprecated + write_time: Julian time tape this record written - deprecated. + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + ClientName: Name of File daemon or Client writing this session + Not used for EOM_LABEL. +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/developers/mempool.tex b/docs/manuals/de/developers/mempool.tex new file mode 100644 index 00000000..a8130200 --- /dev/null +++ b/docs/manuals/de/developers/mempool.tex @@ -0,0 +1,234 @@ +%% +%% + +\chapter{Bacula Memory Management} +\label{_ChapterStart7} +\index{Management!Bacula Memory} +\index{Bacula Memory Management} +\addcontentsline{toc}{section}{Bacula Memory Management} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the memory management routines that are used in Bacula +and is meant to be a technical discussion for developers rather than part of +the user manual. + +Since Bacula may be called upon to handle filenames of varying and more or +less arbitrary length, special attention needs to be used in the code to +ensure that memory buffers are sufficiently large. There are four +possibilities for memory usage within {\bf Bacula}. Each will be described in +turn. They are: + +\begin{itemize} +\item Statically allocated memory. +\item Dynamically allocated memory using malloc() and free(). +\item Non-pooled memory. +\item Pooled memory. + \end{itemize} + +\subsection{Statically Allocated Memory} +\index{Statically Allocated Memory} +\index{Memory!Statically Allocated} +\addcontentsline{toc}{subsubsection}{Statically Allocated Memory} + +Statically allocated memory is of the form: + +\footnotesize +\begin{verbatim} +char buffer[MAXSTRING]; +\end{verbatim} +\normalsize + +The use of this kind of memory is discouraged except when you are 100\% sure +that the strings to be used will be of a fixed length. One example of where +this is appropriate is for {\bf Bacula} resource names, which are currently +limited to 127 characters (MAX\_NAME\_LENGTH). Although this maximum size may +change, particularly to accommodate Unicode, it will remain a relatively small +value. + +\subsection{Dynamically Allocated Memory} +\index{Dynamically Allocated Memory} +\index{Memory!Dynamically Allocated} +\addcontentsline{toc}{subsubsection}{Dynamically Allocated Memory} + +Dynamically allocated memory is obtained using the standard malloc() routines. +As in: + +\footnotesize +\begin{verbatim} +char *buf; +buf = malloc(256); +\end{verbatim} +\normalsize + +This kind of memory can be released with: + +\footnotesize +\begin{verbatim} +free(buf); +\end{verbatim} +\normalsize + +It is recommended to use this kind of memory only when you are sure that you +know the memory size needed and the memory will be used for short periods of +time -- that is it would not be appropriate to use statically allocated +memory. An example might be to obtain a large memory buffer for reading and +writing files. When {\bf SmartAlloc} is enabled, the memory obtained by +malloc() will automatically be checked for buffer overwrite (overflow) during +the free() call, and all malloc'ed memory that is not released prior to +termination of the program will be reported as Orphaned memory. + +\subsection{Pooled and Non-pooled Memory} +\index{Memory!Pooled and Non-pooled} +\index{Pooled and Non-pooled Memory} +\addcontentsline{toc}{subsubsection}{Pooled and Non-pooled Memory} + +In order to facility the handling of arbitrary length filenames and to +efficiently handle a high volume of dynamic memory usage, we have implemented +routines between the C code and the malloc routines. The first is called +``Pooled'' memory, and is memory, which once allocated and then released, is +not returned to the system memory pool, but rather retained in a Bacula memory +pool. The next request to acquire pooled memory will return any free memory +block. In addition, each memory block has its current size associated with the +block allowing for easy checking if the buffer is of sufficient size. This +kind of memory would normally be used in high volume situations (lots of +malloc()s and free()s) where the buffer length may have to frequently change +to adapt to varying filename lengths. + +The non-pooled memory is handled by routines similar to those used for pooled +memory, allowing for easy size checking. However, non-pooled memory is +returned to the system rather than being saved in the Bacula pool. This kind +of memory would normally be used in low volume situations (few malloc()s and +free()s), but where the size of the buffer might have to be adjusted +frequently. + +\paragraph*{Types of Memory Pool:} + +Currently there are three memory pool types: + +\begin{itemize} +\item PM\_NOPOOL -- non-pooled memory. +\item PM\_FNAME -- a filename pool. +\item PM\_MESSAGE -- a message buffer pool. +\item PM\_EMSG -- error message buffer pool. + \end{itemize} + +\paragraph*{Getting Memory:} + +To get memory, one uses: + +\footnotesize +\begin{verbatim} +void *get_pool_memory(pool); +\end{verbatim} +\normalsize + +where {\bf pool} is one of the above mentioned pool names. The size of the +memory returned will be determined by the system to be most appropriate for +the application. + +If you wish non-pooled memory, you may alternatively call: + +\footnotesize +\begin{verbatim} +void *get_memory(size_t size); +\end{verbatim} +\normalsize + +The buffer length will be set to the size specified, and it will be assigned +to the PM\_NOPOOL pool (no pooling). + +\paragraph*{Releasing Memory:} + +To free memory acquired by either of the above two calls, use: + +\footnotesize +\begin{verbatim} +void free_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +where buffer is the memory buffer returned when the memory was acquired. If +the memory was originally allocated as type PM\_NOPOOL, it will be released to +the system, otherwise, it will be placed on the appropriate Bacula memory pool +free chain to be used in a subsequent call for memory from that pool. + +\paragraph*{Determining the Memory Size:} + +To determine the memory buffer size, use: + +\footnotesize +\begin{verbatim} +size_t sizeof_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +\paragraph*{Resizing Pool Memory:} + +To resize pool memory, use: + +\footnotesize +\begin{verbatim} +void *realloc_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +The buffer will be reallocated, and the contents of the original buffer will +be preserved, but the address of the buffer may change. + +\paragraph*{Automatic Size Adjustment:} + +To have the system check and if necessary adjust the size of your pooled +memory buffer, use: + +\footnotesize +\begin{verbatim} +void *check_pool_memory_size(void *buffer, size_t new-size); +\end{verbatim} +\normalsize + +where {\bf new-size} is the buffer length needed. Note, if the buffer is +already equal to or larger than {\bf new-size} no buffer size change will +occur. However, if a buffer size change is needed, the original contents of +the buffer will be preserved, but the buffer address may change. Many of the +low level Bacula subroutines expect to be passed a pool memory buffer and use +this call to ensure the buffer they use is sufficiently large. + +\paragraph*{Releasing All Pooled Memory:} + +In order to avoid orphaned buffer error messages when terminating the program, +use: + +\footnotesize +\begin{verbatim} +void close_memory_pool(); +\end{verbatim} +\normalsize + +to free all unused memory retained in the Bacula memory pool. Note, any memory +not returned to the pool via free\_pool\_memory() will not be released by this +call. + +\paragraph*{Pooled Memory Statistics:} + +For debugging purposes and performance tuning, the following call will print +the current memory pool statistics: + +\footnotesize +\begin{verbatim} +void print_memory_pool_stats(); +\end{verbatim} +\normalsize + +an example output is: + +\footnotesize +\begin{verbatim} +Pool Maxsize Maxused Inuse + 0 256 0 0 + 1 256 1 0 + 2 256 1 0 +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/developers/netprotocol.tex b/docs/manuals/de/developers/netprotocol.tex new file mode 100644 index 00000000..45c2a8ed --- /dev/null +++ b/docs/manuals/de/developers/netprotocol.tex @@ -0,0 +1,224 @@ +%% +%% + +\chapter{TCP/IP Network Protocol} +\label{_ChapterStart5} +\index{TCP/IP Network Protocol} +\index{Protocol!TCP/IP Network} +\addcontentsline{toc}{section}{TCP/IP Network Protocol} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the TCP/IP protocol used by Bacula to communicate +between the various daemons and services. The definitive definition of the +protocol can be found in src/lib/bsock.h, src/lib/bnet.c and +src/lib/bnet\_server.c. + +Bacula's network protocol is basically a ``packet oriented'' protocol built on +a standard TCP/IP streams. At the lowest level all packet transfers are done +with read() and write() requests on system sockets. Pipes are not used as they +are considered unreliable for large serial data transfers between various +hosts. + +Using the routines described below (bnet\_open, bnet\_write, bnet\_recv, and +bnet\_close) guarantees that the number of bytes you write into the socket +will be received as a single record on the other end regardless of how many +low level write() and read() calls are needed. All data transferred are +considered to be binary data. + +\section{bnet and Threads} +\index{Threads!bnet and} +\index{Bnet and Threads} +\addcontentsline{toc}{subsection}{bnet and Threads} + +These bnet routines work fine in a threaded environment. However, they assume +that there is only one reader or writer on the socket at any time. It is +highly recommended that only a single thread access any BSOCK packet. The +exception to this rule is when the socket is first opened and it is waiting +for a job to start. The wait in the Storage daemon is done in one thread and +then passed to another thread for subsequent handling. + +If you envision having two threads using the same BSOCK, think twice, then you +must implement some locking mechanism. However, it probably would not be +appropriate to put locks inside the bnet subroutines for efficiency reasons. + +\section{bnet\_open} +\index{Bnet\_open} +\addcontentsline{toc}{subsection}{bnet\_open} + +To establish a connection to a server, use the subroutine: + +BSOCK *bnet\_open(void *jcr, char *host, char *service, int port, int *fatal) +bnet\_open(), if successful, returns the Bacula sock descriptor pointer to be +used in subsequent bnet\_send() and bnet\_read() requests. If not successful, +bnet\_open() returns a NULL. If fatal is set on return, it means that a fatal +error occurred and that you should not repeatedly call bnet\_open(). Any error +message will generally be sent to the JCR. + +\section{bnet\_send} +\index{Bnet\_send} +\addcontentsline{toc}{subsection}{bnet\_send} + +To send a packet, one uses the subroutine: + +int bnet\_send(BSOCK *sock) This routine is equivalent to a write() except +that it handles the low level details. The data to be sent is expected to be +in sock-\gt{}msg and be sock-\gt{}msglen bytes. To send a packet, bnet\_send() +first writes four bytes in network byte order than indicate the size of the +following data packet. It returns: + +\footnotesize +\begin{verbatim} + Returns 0 on failure + Returns 1 on success +\end{verbatim} +\normalsize + +In the case of a failure, an error message will be sent to the JCR contained +within the bsock packet. + +\section{bnet\_fsend} +\index{Bnet\_fsend} +\addcontentsline{toc}{subsection}{bnet\_fsend} + +This form uses: + +int bnet\_fsend(BSOCK *sock, char *format, ...) and it allows you to send a +formatted messages somewhat like fprintf(). The return status is the same as +bnet\_send. + +\section{Additional Error information} +\index{Information!Additional Error} +\index{Additional Error information} +\addcontentsline{toc}{subsection}{Additional Error information} + +Fro additional error information, you can call {\bf is\_bnet\_error(BSOCK +*bsock)} which will return 0 if there is no error or non-zero if there is an +error on the last transmission. The {\bf is\_bnet\_stop(BSOCK *bsock)} +function will return 0 if there no errors and you can continue sending. It +will return non-zero if there are errors or the line is closed (no more +transmissions should be sent). + +\section{bnet\_recv} +\index{Bnet\_recv} +\addcontentsline{toc}{subsection}{bnet\_recv} + +To read a packet, one uses the subroutine: + +int bnet\_recv(BSOCK *sock) This routine is similar to a read() except that it +handles the low level details. bnet\_read() first reads packet length that +follows as four bytes in network byte order. The data is read into +sock-\gt{}msg and is sock-\gt{}msglen bytes. If the sock-\gt{}msg is not large +enough, bnet\_recv() realloc() the buffer. It will return an error (-2) if +maxbytes is less than the record size sent. It returns: + +\footnotesize +\begin{verbatim} + * Returns number of bytes read + * Returns 0 on end of file + * Returns -1 on hard end of file (i.e. network connection close) + * Returns -2 on error +\end{verbatim} +\normalsize + +It should be noted that bnet\_recv() is a blocking read. + +\section{bnet\_sig} +\index{Bnet\_sig} +\addcontentsline{toc}{subsection}{bnet\_sig} + +To send a ``signal'' from one daemon to another, one uses the subroutine: + +int bnet\_sig(BSOCK *sock, SIGNAL) where SIGNAL is one of the following: + +\begin{enumerate} +\item BNET\_EOF - deprecated use BNET\_EOD +\item BNET\_EOD - End of data stream, new data may follow +\item BNET\_EOD\_POLL - End of data and poll all in one +\item BNET\_STATUS - Request full status +\item BNET\_TERMINATE - Conversation terminated, doing close() +\item BNET\_POLL - Poll request, I'm hanging on a read +\item BNET\_HEARTBEAT - Heartbeat Response requested +\item BNET\_HB\_RESPONSE - Only response permitted to HB +\item BNET\_PROMPT - Prompt for UA + \end{enumerate} + +\section{bnet\_strerror} +\index{Bnet\_strerror} +\addcontentsline{toc}{subsection}{bnet\_strerror} + +Returns a formated string corresponding to the last error that occurred. + +\section{bnet\_close} +\index{Bnet\_close} +\addcontentsline{toc}{subsection}{bnet\_close} + +The connection with the server remains open until closed by the subroutine: + +void bnet\_close(BSOCK *sock) + +\section{Becoming a Server} +\index{Server!Becoming a} +\index{Becoming a Server} +\addcontentsline{toc}{subsection}{Becoming a Server} + +The bnet\_open() and bnet\_close() routines described above are used on the +client side to establish a connection and terminate a connection with the +server. To become a server (i.e. wait for a connection from a client), use the +routine {\bf bnet\_thread\_server}. The calling sequence is a bit complicated, +please refer to the code in bnet\_server.c and the code at the beginning of +each daemon as examples of how to call it. + +\section{Higher Level Conventions} +\index{Conventions!Higher Level} +\index{Higher Level Conventions} +\addcontentsline{toc}{subsection}{Higher Level Conventions} + +Within Bacula, we have established the convention that any time a single +record is passed, it is sent with bnet\_send() and read with bnet\_recv(). +Thus the normal exchange between the server (S) and the client (C) are: + +\footnotesize +\begin{verbatim} +S: wait for connection C: attempt connection +S: accept connection C: bnet_send() send request +S: bnet_recv() wait for request +S: act on request +S: bnet_send() send ack C: bnet_recv() wait for ack +\end{verbatim} +\normalsize + +Thus a single command is sent, acted upon by the server, and then +acknowledged. + +In certain cases, such as the transfer of the data for a file, all the +information or data cannot be sent in a single packet. In this case, the +convention is that the client will send a command to the server, who knows +that more than one packet will be returned. In this case, the server will +enter a loop: + +\footnotesize +\begin{verbatim} +while ((n=bnet_recv(bsock)) > 0) { + act on request +} +if (n < 0) + error +\end{verbatim} +\normalsize + +The client will perform the following: + +\footnotesize +\begin{verbatim} +bnet_send(bsock); +bnet_send(bsock); +... +bnet_sig(bsock, BNET_EOD); +\end{verbatim} +\normalsize + +Thus the client will send multiple packets and signal to the server when all +the packets have been sent by sending a zero length record. diff --git a/docs/manuals/de/developers/platformsupport.tex b/docs/manuals/de/developers/platformsupport.tex new file mode 100644 index 00000000..a04e56f7 --- /dev/null +++ b/docs/manuals/de/developers/platformsupport.tex @@ -0,0 +1,107 @@ +%% +%% + +\chapter{Platform Support} +\label{_PlatformChapter} +\index{Support!Platform} +\index{Platform Support} +\addcontentsline{toc}{section}{Platform Support} + +\section{General} +\index{General } +\addcontentsline{toc}{subsection}{General} + +This chapter describes the requirements for having a +supported platform (Operating System). In general, Bacula is +quite portable. It supports 32 and 64 bit architectures as well +as bigendian and littleendian machines. For full +support, the platform (Operating System) must implement POSIX Unix +system calls. However, for File daemon support only, a small +compatibility library can be written to support almost any +architecture. + +Currently Linux, FreeBSD, and Solaris are fully supported +platforms, which means that the code has been tested on those +machines and passes a full set of regression tests. + +In addition, the Windows File daemon is supported on most versions +of Windows, and finally, there are a number of other platforms +where the File daemon (client) is known to run: NetBSD, OpenBSD, +Mac OSX, SGI, ... + +\section{Requirements to become a Supported Platform} +\index{Requirements!Platform} +\index{Platform Requirements} +\addcontentsline{toc}{subsection}{Platform Requirements} + +As mentioned above, in order to become a fully supported platform, it +must support POSIX Unix system calls. In addition, the following +requirements must be met: + +\begin{itemize} +\item The principal developer (currently Kern) must have + non-root ssh access to a test machine running the platform. +\item The ideal requirements and minimum requirements + for this machine are given below. +\item There must be a defined platform champion who is normally + a system administrator for the machine that is available. This + person need not be a developer/programmer but must be familiar + with system administration of the platform. +\item There must be at least one person designated who will + run regression tests prior to each release. Releases occur + approximately once every 6 months, but can be more frequent. + It takes at most a day's effort to setup the regression scripts + in the beginning, and after that, they can either be run daily + or on demand before a release. Running the regression scripts + involves only one or two command line commands and is fully + automated. +\item Ideally there are one or more persons who will package + each Bacula release. +\item Ideally there are one or more developers who can respond to + and fix platform specific bugs. +\end{itemize} + +Ideal requirements for a test machine: +\begin{itemize} +\item The principal developer will have non-root ssh access to + the test machine at all times. +\item The pricipal developer will have a root password. +\item The test machine will provide approximately 200 MB of + disk space for continual use. +\item The test machine will have approximately 500 MB of free + disk space for temporary use. +\item The test machine will run the most common version of the OS. +\item The test machine will have an autochanger of DDS-4 technology + or later having two or more tapes. +\item The test machine will have MySQL and/or PostgreSQL database + access for account "bacula" available. +\item The test machine will have sftp access. +\item The test machine will provide an smtp server. +\end{itemize} + +Minimum requirements for a test machine: +\begin{itemize} +\item The principal developer will have non-root ssh access to + the test machine when requested approximately once a month. +\item The pricipal developer not have root access. +\item The test machine will provide approximately 80 MB of + disk space for continual use. +\item The test machine will have approximately 300 MB of free + disk space for temporary use. +\item The test machine will run the the OS. +\item The test machine will have a tape drive of DDS-4 technology + or later that can be scheduled for access. +\item The test machine will not have MySQL and/or PostgreSQL database + access. +\item The test machine will have no sftp access. +\item The test machine will provide no email access. +\end{itemize} + +Bare bones test machine requirements: +\begin{itemize} +\item The test machine is available only to a designated + test person (your own machine). +\item The designated test person runs the regession + tests on demand. +\item The test machine has a tape drive available. +\end{itemize} diff --git a/docs/manuals/de/developers/porting.tex b/docs/manuals/de/developers/porting.tex new file mode 100644 index 00000000..278f0e5d --- /dev/null +++ b/docs/manuals/de/developers/porting.tex @@ -0,0 +1,173 @@ +%% +%% + +\chapter{Bacula Porting Notes} +\label{_ChapterStart1} +\index{Notes!Bacula Porting} +\index{Bacula Porting Notes} +\addcontentsline{toc}{section}{Bacula Porting Notes} + +This document is intended mostly for developers who wish to port Bacula to a +system that is not {\bf officially} supported. + +It is hoped that Bacula clients will eventually run on every imaginable system +that needs backing up (perhaps even a Palm). It is also hoped that the Bacula +Directory and Storage daemons will run on every system capable of supporting +them. + +\section{Porting Requirements} +\index{Requirements!Porting} +\index{Porting Requirements} +\addcontentsline{toc}{section}{Porting Requirements} + +In General, the following holds true: + +\begin{itemize} +\item {\bf Bacula} has been compiled and run on Linux RedHat, FreeBSD, and + Solaris systems. +\item In addition, clients exist on Win32, and Irix +\item It requires GNU C++ to compile. You can try with other compilers, but + you are on your own. The Irix client is built with the Irix complier, but, in + general, you will need GNU. +\item Your compiler must provide support for 64 bit signed and unsigned + integers. +\item You will need a recent copy of the {\bf autoconf} tools loaded on your + system (version 2.13 or later). The {\bf autoconf} tools are used to build + the configuration program, but are not part of the Bacula source +distribution. +\item There are certain third party packages that Bacula needs. Except for + MySQL, they can all be found in the {\bf depkgs} and {\bf depkgs1} releases. +\item To build the Win32 binaries, we use Microsoft VC++ standard + 2003. Please see the instructions in + bacula-source/src/win32/README.win32 for more details. If you + want to use VC++ Express, please see README.vc8. Our build is + done under the most recent version of Cygwin, but Cygwin is + not used in the Bacula binaries that are produced. + Unfortunately, we do not have the resources to help you build + your own version of the Win32 FD, so you are pretty much on + your own. You can ask the bacula-devel list for help, but + please don't expect much. +\item {\bf Bacula} requires a good implementation of pthreads to work. +\item The source code has been written with portability in mind and is mostly + POSIX compatible. Thus porting to any POSIX compatible operating system + should be relatively easy. +\end{itemize} + +\section{Steps to Take for Porting} +\index{Porting!Steps to Take for} +\index{Steps to Take for Porting} +\addcontentsline{toc}{section}{Steps to Take for Porting} + +\begin{itemize} +\item The first step is to ensure that you have version 2.13 or later of the + {\bf autoconf} tools loaded. You can skip this step, but making changes to + the configuration program will be difficult or impossible. +\item The run a {\bf ./configure} command in the main source directory and + examine the output. It should look something like the following: + +\footnotesize +\begin{verbatim} +Configuration on Mon Oct 28 11:42:27 CET 2002: + Host: i686-pc-linux-gnu -- redhat 7.3 + Bacula version: 1.27 (26 October 2002) + Source code location: . + Install binaries: /sbin + Install config files: /etc/bacula + C Compiler: gcc + C++ Compiler: c++ + Compiler flags: -g -O2 + Linker flags: + Libraries: -lpthread + Statically Linked Tools: no + Database found: no + Database type: Internal + Database lib: + Job Output Email: root@localhost + Traceback Email: root@localhost + SMTP Host Address: localhost + Director Port 9101 + File daemon Port 9102 + Storage daemon Port 9103 + Working directory /etc/bacula/working + SQL binaries Directory + Large file support: yes + readline support: yes + cweb support: yes /home/kern/bacula/depkgs/cweb + TCP Wrappers support: no + ZLIB support: yes + enable-smartalloc: yes + enable-gnome: no + gmp support: yes +\end{verbatim} +\normalsize + +The details depend on your system. The first thing to check is that it +properly identified your host on the {\bf Host:} line. The first part (added +in version 1.27) is the GNU four part identification of your system. The part +after the -- is your system and the system version. Generally, if your system +is not yet supported, you must correct these. +\item If the {\bf ./configure} does not function properly, you must determine + the cause and fix it. Generally, it will be because some required system + routine is not available on your machine. +\item To correct problems with detection of your system type or with routines + and libraries, you must edit the file {\bf + \lt{}bacula-src\gt{}/autoconf/configure.in}. This is the ``source'' from +which {\bf configure} is built. In general, most of the changes for your +system will be made in {\bf autoconf/aclocal.m4} in the routine {\bf +BA\_CHECK\_OPSYS} or in the routine {\bf BA\_CHECK\_OPSYS\_DISTNAME}. I have +already added the necessary code for most systems, but if yours shows up as +{\bf unknown} you will need to make changes. Then as mentioned above, you +will need to set a number of system dependent items in {\bf configure.in} in +the {\bf case} statement at approximately line 1050 (depending on the Bacula +release). +\item The items to in the case statement that corresponds to your system are + the following: + +\begin{itemize} +\item DISTVER -- set to the version of your operating system. Typically some + form of {\bf uname} obtains it. +\item TAPEDRIVE -- the default tape drive. Not too important as the user can + set it as an option. +\item PSCMD -- set to the {\bf ps} command that will provide the PID in the + first field and the program name in the second field. If this is not set + properly, the {\bf bacula stop} script will most likely not be able to stop +Bacula in all cases. +\item hostname -- command to return the base host name (non-qualified) of + your system. This is generally the machine name. Not too important as the + user can correct this in his configuration file. +\item CFLAGS -- set any special compiler flags needed. Many systems need a + special flag to make pthreads work. See cygwin for an example. +\item LDFLAGS -- set any special loader flags. See cygwin for an example. +\item PTHREAD\_LIB -- set for any special pthreads flags needed during + linking. See freebsd as an example. +\item lld -- set so that a ``long long int'' will be properly edited in a + printf() call. +\item llu -- set so that a ``long long unsigned'' will be properly edited in + a printf() call. +\item PFILES -- set to add any files that you may define is your platform + subdirectory. These files are used for installation of automatic system + startup of Bacula daemons. +\end{itemize} + +\item To rebuild a new version of {\bf configure} from a changed {\bf + autoconf/configure.in} you enter {\bf make configure} in the top level Bacula + source directory. You must have done a ./configure prior to trying to rebuild + the configure script or it will get into an infinite loop. +\item If the {\bf make configure} gets into an infinite loop, ctl-c it, then + do {\bf ./configure} (no options are necessary) and retry the {\bf make + configure}, which should now work. +\item To rebuild {\bf configure} you will need to have {\bf autoconf} version + 2.57-3 or higher loaded. Older versions of autoconf will complain about + unknown or bad options, and won't work. +\item After you have a working {\bf configure} script, you may need to make a + few system dependent changes to the way Bacula works. Generally, these are + done in {\bf src/baconfig.h}. You can find a few examples of system dependent +changes toward the end of this file. For example, on Irix systems, there is +no definition for {\bf socklen\_t}, so it is made in this file. If your +system has structure alignment requirements, check the definition of BALIGN +in this file. Currently, all Bacula allocated memory is aligned on a {\bf +double} boundary. +\item If you are having problems with Bacula's type definitions, you might + look at {\bf src/bc\_types.h} where all the types such as {\bf uint32\_t}, + {\bf uint64\_t}, etc. that Bacula uses are defined. +\end{itemize} diff --git a/docs/manuals/de/developers/setup.sm b/docs/manuals/de/developers/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/developers/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/developers/smartall.tex b/docs/manuals/de/developers/smartall.tex new file mode 100644 index 00000000..41f66f08 --- /dev/null +++ b/docs/manuals/de/developers/smartall.tex @@ -0,0 +1,432 @@ +%% +%% + +\addcontentsline{lof}{figure}{Smart Memory Allocation with Orphaned Buffer +Detection} +\includegraphics{./smartall.eps} + +\chapter{Smart Memory Allocation} +\label{_ChapterStart4} +\index{Detection!Smart Memory Allocation With Orphaned Buffer } +\index{Smart Memory Allocation With Orphaned Buffer Detection } +\addcontentsline{toc}{section}{Smart Memory Allocation With Orphaned Buffer +Detection} + +Few things are as embarrassing as a program that leaks, yet few errors are so +easy to commit or as difficult to track down in a large, complicated program +as failure to release allocated memory. SMARTALLOC replaces the standard C +library memory allocation functions with versions which keep track of buffer +allocations and releases and report all orphaned buffers at the end of program +execution. By including this package in your program during development and +testing, you can identify code that loses buffers right when it's added and +most easily fixed, rather than as part of a crisis debugging push when the +problem is identified much later in the testing cycle (or even worse, when the +code is in the hands of a customer). When program testing is complete, simply +recompiling with different flags removes SMARTALLOC from your program, +permitting it to run without speed or storage penalties. + +In addition to detecting orphaned buffers, SMARTALLOC also helps to find other +common problems in management of dynamic storage including storing before the +start or beyond the end of an allocated buffer, referencing data through a +pointer to a previously released buffer, attempting to release a buffer twice +or releasing storage not obtained from the allocator, and assuming the initial +contents of storage allocated by functions that do not guarantee a known +value. SMARTALLOC's checking does not usually add a large amount of overhead +to a program (except for programs which use {\tt realloc()} extensively; see +below). SMARTALLOC focuses on proper storage management rather than internal +consistency of the heap as checked by the malloc\_debug facility available on +some systems. SMARTALLOC does not conflict with malloc\_debug and both may be +used together, if you wish. SMARTALLOC makes no assumptions regarding the +internal structure of the heap and thus should be compatible with any C +language implementation of the standard memory allocation functions. + +\subsection{ Installing SMARTALLOC} +\index{SMARTALLOC!Installing } +\index{Installing SMARTALLOC } +\addcontentsline{toc}{subsection}{Installing SMARTALLOC} + +SMARTALLOC is provided as a Zipped archive, +\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}; see the +download instructions below. + +To install SMARTALLOC in your program, simply add the statement: + +to every C program file which calls any of the memory allocation functions +({\tt malloc}, {\tt calloc}, {\tt free}, etc.). SMARTALLOC must be used for +all memory allocation with a program, so include file for your entire program, +if you have such a thing. Next, define the symbol SMARTALLOC in the +compilation before the inclusion of smartall.h. I usually do this by having my +Makefile add the ``{\tt -DSMARTALLOC}'' option to the C compiler for +non-production builds. You can define the symbol manually, if you prefer, by +adding the statement: + +{\tt \#define SMARTALLOC} + +At the point where your program is all done and ready to relinquish control to +the operating system, add the call: + +{\tt \ \ \ \ \ \ \ \ sm\_dump(}{\it datadump}{\tt );} + +where {\it datadump} specifies whether the contents of orphaned buffers are to +be dumped in addition printing to their size and place of allocation. The data +are dumped only if {\it datadump} is nonzero, so most programs will normally +use ``{\tt sm\_dump(0);}''. If a mysterious orphaned buffer appears that can't +be identified from the information this prints about it, replace the statement +with ``{\tt sm\_dump(1)};''. Usually the dump of the buffer's data will +furnish the additional clues you need to excavate and extirpate the elusive +error that left the buffer allocated. + +Finally, add the files ``smartall.h'' and ``smartall.c'' from this release to +your source directory, make dependencies, and linker input. You needn't make +inclusion of smartall.c in your link optional; if compiled with SMARTALLOC not +defined it generates no code, so you may always include it knowing it will +waste no storage in production builds. Now when you run your program, if it +leaves any buffers around when it's done, each will be reported by {\tt +sm\_dump()} on stderr as follows: + +\footnotesize +\begin{verbatim} +Orphaned buffer: 120 bytes allocated at line 50 of gutshot.c +\end{verbatim} +\normalsize + +\subsection{ Squelching a SMARTALLOC} +\index{SMARTALLOC!Squelching a } +\index{Squelching a SMARTALLOC } +\addcontentsline{toc}{subsection}{Squelching a SMARTALLOC} + +Usually, when you first install SMARTALLOC in an existing program you'll find +it nattering about lots of orphaned buffers. Some of these turn out to be +legitimate errors, but some are storage allocated during program +initialisation that, while dynamically allocated, is logically static storage +not intended to be released. Of course, you can get rid of the complaints +about these buffers by adding code to release them, but by doing so you're +adding unnecessary complexity and code size to your program just to silence +the nattering of a SMARTALLOC, so an escape hatch is provided to eliminate the +need to release these buffers. + +Normally all storage allocated with the functions {\tt malloc()}, {\tt +calloc()}, and {\tt realloc()} is monitored by SMARTALLOC. If you make the +function call: + +\footnotesize +\begin{verbatim} + sm_static(1); +\end{verbatim} +\normalsize + +you declare that subsequent storage allocated by {\tt malloc()}, {\tt +calloc()}, and {\tt realloc()} should not be considered orphaned if found to +be allocated when {\tt sm\_dump()} is called. I use a call on ``{\tt +sm\_static(1);}'' before I allocate things like program configuration tables +so I don't have to add code to release them at end of program time. After +allocating unmonitored data this way, be sure to add a call to: + +\footnotesize +\begin{verbatim} + sm_static(0); +\end{verbatim} +\normalsize + +to resume normal monitoring of buffer allocations. Buffers allocated while +{\tt sm\_static(1}) is in effect are not checked for having been orphaned but +all the other safeguards provided by SMARTALLOC remain in effect. You may +release such buffers, if you like; but you don't have to. + +\subsection{ Living with Libraries} +\index{Libraries!Living with } +\index{Living with Libraries } +\addcontentsline{toc}{subsection}{Living with Libraries} + +Some library functions for which source code is unavailable may gratuitously +allocate and return buffers that contain their results, or require you to pass +them buffers which they subsequently release. If you have source code for the +library, by far the best approach is to simply install SMARTALLOC in it, +particularly since this kind of ill-structured dynamic storage management is +the source of so many storage leaks. Without source code, however, there's no +option but to provide a way to bypass SMARTALLOC for the buffers the library +allocates and/or releases with the standard system functions. + +For each function {\it xxx} redefined by SMARTALLOC, a corresponding routine +named ``{\tt actually}{\it xxx}'' is furnished which provides direct access to +the underlying system function, as follows: + +\begin{quote} + +\begin{longtable}{ll} +\multicolumn{1}{l }{\bf Standard function } & \multicolumn{1}{l }{\bf Direct +access function } \\ +{{\tt malloc(}{\it size}{\tt )} } & {{\tt actuallymalloc(}{\it size}{\tt )} +} \\ +{{\tt calloc(}{\it nelem}{\tt ,} {\it elsize}{\tt )} } & {{\tt +actuallycalloc(}{\it nelem}, {\it elsize}{\tt )} } \\ +{{\tt realloc(}{\it ptr}{\tt ,} {\it size}{\tt )} } & {{\tt +actuallyrealloc(}{\it ptr}, {\it size}{\tt )} } \\ +{{\tt free(}{\it ptr}{\tt )} } & {{\tt actuallyfree(}{\it ptr}{\tt )} } + +\end{longtable} + +\end{quote} + +For example, suppose there exists a system library function named ``{\tt +getimage()}'' which reads a raster image file and returns the address of a +buffer containing it. Since the library routine allocates the image directly +with {\tt malloc()}, you can't use SMARTALLOC's {\tt free()}, as that call +expects information placed in the buffer by SMARTALLOC's special version of +{\tt malloc()}, and hence would report an error. To release the buffer you +should call {\tt actuallyfree()}, as in this code fragment: + +\footnotesize +\begin{verbatim} + struct image *ibuf = getimage("ratpack.img"); + display_on_screen(ibuf); + actuallyfree(ibuf); +\end{verbatim} +\normalsize + +Conversely, suppose we are to call a library function, ``{\tt putimage()}'', +which writes an image buffer into a file and then releases the buffer with +{\tt free()}. Since the system {\tt free()} is being called, we can't pass a +buffer allocated by SMARTALLOC's allocation routines, as it contains special +information that the system {\tt free()} doesn't expect to be there. The +following code uses {\tt actuallymalloc()} to obtain the buffer passed to such +a routine. + +\footnotesize +\begin{verbatim} + struct image *obuf = + (struct image *) actuallymalloc(sizeof(struct image)); + dump_screen_to_image(obuf); + putimage("scrdump.img", obuf); /* putimage() releases obuf */ +\end{verbatim} +\normalsize + +It's unlikely you'll need any of the ``actually'' calls except under very odd +circumstances (in four products and three years, I've only needed them once), +but they're there for the rare occasions that demand them. Don't use them to +subvert the error checking of SMARTALLOC; if you want to disable orphaned +buffer detection, use the {\tt sm\_static(1)} mechanism described above. That +way you don't forfeit all the other advantages of SMARTALLOC as you do when +using {\tt actuallymalloc()} and {\tt actuallyfree()}. + +\subsection{ SMARTALLOC Details} +\index{SMARTALLOC Details } +\index{Details!SMARTALLOC } +\addcontentsline{toc}{subsection}{SMARTALLOC Details} + +When you include ``smartall.h'' and define SMARTALLOC, the following standard +system library functions are redefined with the \#define mechanism to call +corresponding functions within smartall.c instead. (For details of the +redefinitions, please refer to smartall.h.) + +\footnotesize +\begin{verbatim} + void *malloc(size_t size) + void *calloc(size_t nelem, size_t elsize) + void *realloc(void *ptr, size_t size) + void free(void *ptr) + void cfree(void *ptr) +\end{verbatim} +\normalsize + +{\tt cfree()} is a historical artifact identical to {\tt free()}. + +In addition to allocating storage in the same way as the standard library +functions, the SMARTALLOC versions expand the buffers they allocate to include +information that identifies where each buffer was allocated and to chain all +allocated buffers together. When a buffer is released, it is removed from the +allocated buffer chain. A call on {\tt sm\_dump()} is able, by scanning the +chain of allocated buffers, to find all orphaned buffers. Buffers allocated +while {\tt sm\_static(1)} is in effect are specially flagged so that, despite +appearing on the allocated buffer chain, {\tt sm\_dump()} will not deem them +orphans. + +When a buffer is allocated by {\tt malloc()} or expanded with {\tt realloc()}, +all bytes of newly allocated storage are set to the hexadecimal value 0x55 +(alternating one and zero bits). Note that for {\tt realloc()} this applies +only to the bytes added at the end of buffer; the original contents of the +buffer are not modified. Initializing allocated storage to a distinctive +nonzero pattern is intended to catch code that erroneously assumes newly +allocated buffers are cleared to zero; in fact their contents are random. The +{\tt calloc()} function, defined as returning a buffer cleared to zero, +continues to zero its buffers under SMARTALLOC. + +Buffers obtained with the SMARTALLOC functions contain a special sentinel byte +at the end of the user data area. This byte is set to a special key value +based upon the buffer's memory address. When the buffer is released, the key +is tested and if it has been overwritten an assertion in the {\tt free} +function will fail. This catches incorrect program code that stores beyond the +storage allocated for the buffer. At {\tt free()} time the queue links are +also validated and an assertion failure will occur if the program has +destroyed them by storing before the start of the allocated storage. + +In addition, when a buffer is released with {\tt free()}, its contents are +immediately destroyed by overwriting them with the hexadecimal pattern 0xAA +(alternating bits, the one's complement of the initial value pattern). This +will usually trip up code that keeps a pointer to a buffer that's been freed +and later attempts to reference data within the released buffer. Incredibly, +this is {\it legal} in the standard Unix memory allocation package, which +permits programs to free() buffers, then raise them from the grave with {\tt +realloc()}. Such program ``logic'' should be fixed, not accommodated, and +SMARTALLOC brooks no such Lazarus buffer`` nonsense. + +Some C libraries allow a zero size argument in calls to {\tt malloc()}. Since +this is far more likely to indicate a program error than a defensible +programming stratagem, SMARTALLOC disallows it with an assertion. + +When the standard library {\tt realloc()} function is called to expand a +buffer, it attempts to expand the buffer in place if possible, moving it only +if necessary. Because SMARTALLOC must place its own private storage in the +buffer and also to aid in error detection, its version of {\tt realloc()} +always moves and copies the buffer except in the trivial case where the size +of the buffer is not being changed. By forcing the buffer to move on every +call and destroying the contents of the old buffer when it is released, +SMARTALLOC traps programs which keep pointers into a buffer across a call on +{\tt realloc()} which may move it. This strategy may prove very costly to +programs which make extensive use of {\tt realloc()}. If this proves to be a +problem, such programs may wish to use {\tt actuallymalloc()}, {\tt +actuallyrealloc()}, and {\tt actuallyfree()} for such frequently-adjusted +buffers, trading error detection for performance. Although not specified in +the System V Interface Definition, many C library implementations of {\tt +realloc()} permit an old buffer argument of NULL, causing {\tt realloc()} to +allocate a new buffer. The SMARTALLOC version permits this. + +\subsection{ When SMARTALLOC is Disabled} +\index{When SMARTALLOC is Disabled } +\index{Disabled!When SMARTALLOC is } +\addcontentsline{toc}{subsection}{When SMARTALLOC is Disabled} + +When SMARTALLOC is disabled by compiling a program with the symbol SMARTALLOC +not defined, calls on the functions otherwise redefined by SMARTALLOC go +directly to the system functions. In addition, compile-time definitions +translate calls on the ''{\tt actually}...{\tt ()}`` functions into the +corresponding library calls; ''{\tt actuallymalloc(100)}``, for example, +compiles into ''{\tt malloc(100)}``. The two special SMARTALLOC functions, +{\tt sm\_dump()} and {\tt sm\_static()}, are defined to generate no code +(hence the null statement). Finally, if SMARTALLOC is not defined, compilation +of the file smartall.c generates no code or data at all, effectively removing +it from the program even if named in the link instructions. + +Thus, except for unusual circumstances, a program that works with SMARTALLOC +defined for testing should require no changes when built without it for +production release. + +\subsection{ The {\tt alloc()} Function} +\index{Function!alloc } +\index{Alloc() Function } +\addcontentsline{toc}{subsection}{alloc() Function} + +Many programs I've worked on use very few direct calls to {\tt malloc()}, +using the identically declared {\tt alloc()} function instead. Alloc detects +out-of-memory conditions and aborts, removing the need for error checking on +every call of {\tt malloc()} (and the temptation to skip checking for +out-of-memory). + +As a convenience, SMARTALLOC supplies a compatible version of {\tt alloc()} in +the file alloc.c, with its definition in the file alloc.h. This version of +{\tt alloc()} is sensitive to the definition of SMARTALLOC and cooperates with +SMARTALLOC's orphaned buffer detection. In addition, when SMARTALLOC is +defined and {\tt alloc()} detects an out of memory condition, it takes +advantage of the SMARTALLOC diagnostic information to identify the file and +line number of the call on {\tt alloc()} that failed. + +\subsection{ Overlays and Underhandedness} +\index{Underhandedness!Overlays and } +\index{Overlays and Underhandedness } +\addcontentsline{toc}{subsection}{Overlays and Underhandedness} + +String constants in the C language are considered to be static arrays of +characters accessed through a pointer constant. The arrays are potentially +writable even though their pointer is a constant. SMARTALLOC uses the +compile-time definition {\tt ./smartall.wml} to obtain the name of the file in +which a call on buffer allocation was performed. Rather than reserve space in +a buffer to save this information, SMARTALLOC simply stores the pointer to the +compiled-in text of the file name. This works fine as long as the program does +not overlay its data among modules. If data are overlayed, the area of memory +which contained the file name at the time it was saved in the buffer may +contain something else entirely when {\tt sm\_dump()} gets around to using the +pointer to edit the file name which allocated the buffer. + +If you want to use SMARTALLOC in a program with overlayed data, you'll have to +modify smartall.c to either copy the file name to a fixed-length field added +to the {\tt abufhead} structure, or else allocate storage with {\tt malloc()}, +copy the file name there, and set the {\tt abfname} pointer to that buffer, +then remember to release the buffer in {\tt sm\_free}. Either of these +approaches are wasteful of storage and time, and should be considered only if +there is no alternative. Since most initial debugging is done in non-overlayed +environments, the restrictions on SMARTALLOC with data overlaying may never +prove a problem. Note that conventional overlaying of code, by far the most +common form of overlaying, poses no problems for SMARTALLOC; you need only be +concerned if you're using exotic tools for data overlaying on MS-DOS or other +address-space-challenged systems. + +Since a C language ''constant`` string can actually be written into, most C +compilers generate a unique copy of each string used in a module, even if the +same constant string appears many times. In modules that contain many calls on +allocation functions, this results in substantial wasted storage for the +strings that identify the file name. If your compiler permits optimization of +multiple occurrences of constant strings, enabling this mode will eliminate +the overhead for these strings. Of course, it's up to you to make sure +choosing this compiler mode won't wreak havoc on some other part of your +program. + +\subsection{ Test and Demonstration Program} +\index{Test and Demonstration Program } +\index{Program!Test and Demonstration } +\addcontentsline{toc}{subsection}{Test and Demonstration Program} + +A test and demonstration program, smtest.c, is supplied with SMARTALLOC. You +can build this program with the Makefile included. Please refer to the +comments in smtest.c and the Makefile for information on this program. If +you're attempting to use SMARTALLOC on a new machine or with a new compiler or +operating system, it's a wise first step to check it out with smtest first. + +\subsection{ Invitation to the Hack} +\index{Hack!Invitation to the } +\index{Invitation to the Hack } +\addcontentsline{toc}{subsection}{Invitation to the Hack} + +SMARTALLOC is not intended to be a panacea for storage management problems, +nor is it universally applicable or effective; it's another weapon in the +arsenal of the defensive professional programmer attempting to create reliable +products. It represents the current state of evolution of expedient debug code +which has been used in several commercial software products which have, +collectively, sold more than third of a million copies in the retail market, +and can be expected to continue to develop through time as it is applied to +ever more demanding projects. + +The version of SMARTALLOC here has been tested on a Sun SPARCStation, Silicon +Graphics Indigo2, and on MS-DOS using both Borland and Microsoft C. Moving +from compiler to compiler requires the usual small changes to resolve disputes +about prototyping of functions, whether the type returned by buffer allocation +is {\tt char\ *} or {\tt void\ *}, and so forth, but following those changes +it works in a variety of environments. I hope you'll find SMARTALLOC as useful +for your projects as I've found it in mine. + +\section{ +\elink{}{http://www.fourmilab.ch/smartall/smartall.zip} +\elink{Download smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip} +(Zipped archive)} +\index{Archive! Download smartall.zip Zipped } +\index{ Download smartall.zip (Zipped archive) } +\addcontentsline{toc}{section}{ Download smartall.zip (Zipped archive)} + +SMARTALLOC is provided as +\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}, a +\elink{Zipped}{http://www.pkware.com/} archive containing source code, +documentation, and a {\tt Makefile} to build the software under Unix. + +\subsection{ Copying} +\index{Copying } +\addcontentsline{toc}{subsection}{Copying} + +\begin{quote} +SMARTALLOC is in the public domain. Permission to use, copy, modify, and +distribute this software and its documentation for any purpose and without fee +is hereby granted, without any conditions or restrictions. This software is +provided ''as is`` without express or implied warranty. +\end{quote} + +{\it +\elink{by John Walker}{http://www.fourmilab.ch} +October 30th, 1998 } diff --git a/docs/manuals/de/developers/storage.tex b/docs/manuals/de/developers/storage.tex new file mode 100644 index 00000000..e46f228c --- /dev/null +++ b/docs/manuals/de/developers/storage.tex @@ -0,0 +1,258 @@ +%% +%% + +\chapter{Storage Daemon Design} +\label{_ChapterStart3} +\index{Storage Daemon Design } +\index{Design!Storage Daemon } +\addcontentsline{toc}{section}{Storage Daemon Design} + +This chapter is intended to be a technical discussion of the Storage daemon +services and as such is not targeted at end users but rather at developers and +system administrators that want or need to know more of the working details of +{\bf Bacula}. + +This document is somewhat out of date. + +\section{SD Design Introduction} +\index{Introduction!SD Design } +\index{SD Design Introduction } +\addcontentsline{toc}{section}{SD Design Introduction} + +The Bacula Storage daemon provides storage resources to a Bacula installation. +An individual Storage daemon is associated with a physical permanent storage +device (for example, a tape drive, CD writer, tape changer or jukebox, etc.), +and may employ auxiliary storage resources (such as space on a hard disk file +system) to increase performance and/or optimize use of the permanent storage +medium. + +Any number of storage daemons may be run on a given machine; each associated +with an individual storage device connected to it, and BACULA operations may +employ storage daemons on any number of hosts connected by a network, local or +remote. The ability to employ remote storage daemons (with appropriate +security measures) permits automatic off-site backup, possibly to publicly +available backup repositories. + +\section{SD Development Outline} +\index{Outline!SD Development } +\index{SD Development Outline } +\addcontentsline{toc}{section}{SD Development Outline} + +In order to provide a high performance backup and restore solution that scales +to very large capacity devices and networks, the storage daemon must be able +to extract as much performance from the storage device and network with which +it interacts. In order to accomplish this, storage daemons will eventually +have to sacrifice simplicity and painless portability in favor of techniques +which improve performance. My goal in designing the storage daemon protocol +and developing the initial prototype storage daemon is to provide for these +additions in the future, while implementing an initial storage daemon which is +very simple and portable to almost any POSIX-like environment. This original +storage daemon (and its evolved descendants) can serve as a portable solution +for non-demanding backup requirements (such as single servers of modest size, +individual machines, or small local networks), while serving as the starting +point for development of higher performance configurable derivatives which use +techniques such as POSIX threads, shared memory, asynchronous I/O, buffering +to high-speed intermediate media, and support for tape changers and jukeboxes. + + +\section{SD Connections and Sessions} +\index{Sessions!SD Connections and } +\index{SD Connections and Sessions } +\addcontentsline{toc}{section}{SD Connections and Sessions} + +A client connects to a storage server by initiating a conventional TCP +connection. The storage server accepts the connection unless its maximum +number of connections has been reached or the specified host is not granted +access to the storage server. Once a connection has been opened, the client +may make any number of Query requests, and/or initiate (if permitted), one or +more Append sessions (which transmit data to be stored by the storage daemon) +and/or Read sessions (which retrieve data from the storage daemon). + +Most requests and replies sent across the connection are simple ASCII strings, +with status replies prefixed by a four digit status code for easier parsing. +Binary data appear in blocks stored and retrieved from the storage. Any +request may result in a single-line status reply of ``{\tt 3201\ Notification\ +pending}'', which indicates the client must send a ``Query notification'' +request to retrieve one or more notifications posted to it. Once the +notifications have been returned, the client may then resubmit the request +which resulted in the 3201 status. + +The following descriptions omit common error codes, yet to be defined, which +can occur from most or many requests due to events like media errors, +restarting of the storage daemon, etc. These details will be filled in, along +with a comprehensive list of status codes along with which requests can +produce them in an update to this document. + +\subsection{SD Append Requests} +\index{Requests!SD Append } +\index{SD Append Requests } +\addcontentsline{toc}{subsection}{SD Append Requests} + +\begin{description} + +\item [{append open session = \lt{}JobId\gt{} [ \lt{}Password\gt{} ] }] + \index{SPAN class } + A data append session is opened with the Job ID given by {\it JobId} with +client password (if required) given by {\it Password}. If the session is +successfully opened, a status of {\tt 3000\ OK} is returned with a ``{\tt +ticket\ =\ }{\it number}'' reply used to identify subsequent messages in the +session. If too many sessions are open, or a conflicting session (for +example, a read in progress when simultaneous read and append sessions are +not permitted), a status of ``{\tt 3502\ Volume\ busy}'' is returned. If no +volume is mounted, or the volume mounted cannot be appended to, a status of +``{\tt 3503\ Volume\ not\ mounted}'' is returned. + +\item [append data = \lt{}ticket-number\gt{} ] + \index{SPAN class } + If the append data is accepted, a status of {\tt 3000\ OK data address = +\lt{}IPaddress\gt{} port = \lt{}port\gt{}} is returned, where the {\tt +IPaddress} and {\tt port} specify the IP address and port number of the data +channel. Error status codes are {\tt 3504\ Invalid\ ticket\ number} and {\tt +3505\ Session\ aborted}, the latter of which indicates the entire append +session has failed due to a daemon or media error. + +Once the File daemon has established the connection to the data channel +opened by the Storage daemon, it will transfer a header packet followed by +any number of data packets. The header packet is of the form: + +{\tt \lt{}file-index\gt{} \lt{}stream-id\gt{} \lt{}info\gt{}} + +The details are specified in the +\ilink{Daemon Protocol}{_ChapterStart2} section of this +document. + +\item [*append abort session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The open append session with ticket {\it ticket-number} is aborted; any blocks +not yet written to permanent media are discarded. Subsequent attempts to +append data to the session will receive an error status of {\tt 3505\ +Session\ aborted}. + +\item [append end session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The open append session with ticket {\it ticket-number} is marked complete; no +further blocks may be appended. The storage daemon will give priority to +saving any buffered blocks from this session to permanent media as soon as +possible. + +\item [append close session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The append session with ticket {\it ticket} is closed. This message does not +receive an {\tt 3000\ OK} reply until all of the content of the session are +stored on permanent media, at which time said reply is given, followed by a +list of volumes, from first to last, which contain blocks from the session, +along with the first and last file and block on each containing session data +and the volume session key identifying data from that session in lines with +the following format: + +{\tt {\tt Volume = }\lt{}Volume-id\gt{} \lt{}start-file\gt{} +\lt{}start-block\gt{} \lt{}end-file\gt{} \lt{}end-block\gt{} +\lt{}volume-session-id\gt{}}where {\it Volume-id} is the volume label, {\it +start-file} and {\it start-block} are the file and block containing the first +data from that session on the volume, {\it end-file} and {\it end-block} are +the file and block with the last data from the session on the volume and {\it +volume-session-id} is the volume session ID for blocks from the session +stored on that volume. +\end{description} + +\subsection{SD Read Requests} +\index{SD Read Requests } +\index{Requests!SD Read } +\addcontentsline{toc}{subsection}{SD Read Requests} + +\begin{description} + +\item [Read open session = \lt{}JobId\gt{} \lt{}Volume-id\gt{} + \lt{}start-file\gt{} \lt{}start-block\gt{} \lt{}end-file\gt{} + \lt{}end-block\gt{} \lt{}volume-session-id\gt{} \lt{}password\gt{} ] +\index{SPAN class } +where {\it Volume-id} is the volume label, {\it start-file} and {\it +start-block} are the file and block containing the first data from that +session on the volume, {\it end-file} and {\it end-block} are the file and +block with the last data from the session on the volume and {\it +volume-session-id} is the volume session ID for blocks from the session +stored on that volume. + +If the session is successfully opened, a status of + +{\tt {\tt 3100\ OK Ticket\ =\ }{\it number}``} + +is returned with a reply used to identify subsequent messages in the session. +If too many sessions are open, or a conflicting session (for example, an +append in progress when simultaneous read and append sessions are not +permitted), a status of ''{\tt 3502\ Volume\ busy}`` is returned. If no +volume is mounted, or the volume mounted cannot be appended to, a status of +''{\tt 3503\ Volume\ not\ mounted}`` is returned. If no block with the given +volume session ID and the correct client ID number appears in the given first +file and block for the volume, a status of ''{\tt 3505\ Session\ not\ +found}`` is returned. + +\item [Read data = \lt{}Ticket\gt{} \gt{} \lt{}Block\gt{} ] + \index{SPAN class } + The specified Block of data from open read session with the specified Ticket +number is returned, with a status of {\tt 3000\ OK} followed by a ''{\tt +Length\ =\ }{\it size}`` line giving the length in bytes of the block data +which immediately follows. Blocks must be retrieved in ascending order, but +blocks may be skipped. If a block number greater than the largest stored on +the volume is requested, a status of ''{\tt 3201\ End\ of\ volume}`` is +returned. If a block number greater than the largest in the file is +requested, a status of ''{\tt 3401\ End\ of\ file}`` is returned. + +\item [Read close session = \lt{}Ticket\gt{} ] + \index{SPAN class } + The read session with Ticket number is closed. A read session may be closed +at any time; you needn't read all its blocks before closing it. +\end{description} + +{\it by +\elink{John Walker}{http://www.fourmilab.ch/} +January 30th, MM } + +\section{SD Data Structures} +\index{SD Data Structures} +\addcontentsline{toc}{section}{SD Data Structures} + +In the Storage daemon, there is a Device resource (i.e. from conf file) +that describes each physical device. When the physical device is used it +is controled by the DEVICE structure (defined in dev.h), and typically +refered to as dev in the C++ code. Anyone writing or reading a physical +device must ultimately get a lock on the DEVICE structure -- this controls +the device. However, multiple Jobs (defined by a JCR structure src/jcr.h) +can be writing a physical DEVICE at the same time (of course they are +sequenced by locking the DEVICE structure). There are a lot of job +dependent "device" variables that may be different for each Job such as +spooling (one job may spool and another may not, and when a job is +spooling, it must have an i/o packet open, each job has its own record and +block structures, ...), so there is a device control record or DCR that is +the primary way of interfacing to the physical device. The DCR contains +all the job specific data as well as a pointer to the Device resource +(DEVRES structure) and the physical DEVICE structure. + +Now if a job is writing to two devices (it could be writing two separate +streams to the same device), it must have two DCRs. Today, the code only +permits one. This won't be hard to change, but it is new code. + +Today three jobs (threads), two physical devices each job + writes to only one device: + +\begin{verbatim} + Job1 -> DCR1 -> DEVICE1 + Job2 -> DCR2 -> DEVICE1 + Job3 -> DCR3 -> DEVICE2 +\end{verbatim} + +To be implemented three jobs, three physical devices, but + job1 is writing simultaneously to three devices: + +\begin{verbatim} + Job1 -> DCR1 -> DEVICE1 + -> DCR4 -> DEVICE2 + -> DCR5 -> DEVICE3 + Job2 -> DCR2 -> DEVICE1 + Job3 -> DCR3 -> DEVICE2 + + Job = job control record + DCR = Job contorl data for a specific device + DEVICE = Device only control data +\end{verbatim} + diff --git a/docs/manuals/de/developers/tls-techdoc.tex b/docs/manuals/de/developers/tls-techdoc.tex new file mode 100644 index 00000000..565869f1 --- /dev/null +++ b/docs/manuals/de/developers/tls-techdoc.tex @@ -0,0 +1,391 @@ +%% +%% + +%\author{Landon Fuller} +%\title{Bacula TLS Additions} + +\chapter{TLS} +\label{_Chapter_TLS} +\index{TLS} + +Written by Landon Fuller + +\section{Introduction to TLS} +\index{TLS Introduction} +\index{Introduction!TLS} +\addcontentsline{toc}{section}{TLS Introduction} + +This patch includes all the back-end code necessary to add complete TLS +data encryption support to Bacula. In addition, support for TLS in +Console/Director communications has been added as a proof of concept. +Adding support for the remaining daemons will be straight-forward. +Supported features of this patchset include: + +\begin{itemize} +\item Client/Server TLS Requirement Negotiation +\item TLSv1 Connections with Server and Client Certificate +Validation +\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying +\end{itemize} + +This document will refer to both ``server'' and ``client'' contexts. These +terms refer to the accepting and initiating peer, respectively. + +Diffie-Hellman anonymous ciphers are not supported by this patchset. The +use of DH anonymous ciphers increases the code complexity and places +explicit trust upon the two-way Cram-MD5 implementation. Cram-MD5 is +subject to known plaintext attacks, and is should be considered +considerably less secure than PKI certificate-based authentication. + +Appropriate autoconf macros have been added to detect and use OpenSSL. Two +additional preprocessor defines have been added: \emph{HAVE\_TLS} and +\emph{HAVE\_OPENSSL}. All changes not specific to OpenSSL rely on +\emph{HAVE\_TLS}. OpenSSL-specific code is constrained to +\emph{src/lib/tls.c} to facilitate the support of alternative TLS +implementations. + +\section{New Configuration Directives} +\index{TLS Configuration Directives} +\index{Directives!TLS Configuration} +\addcontentsline{toc}{section}{New Configuration Directives} + +Additional configuration directives have been added to both the Console and +Director resources. These new directives are defined as follows: + +\begin{itemize} +\item \underline{TLS Enable} \emph{(yes/no)} +Enable TLS support. + +\item \underline{TLS Require} \emph{(yes/no)} +Require TLS connections. + +\item \underline{TLS Certificate} \emph{(path)} +Path to PEM encoded TLS certificate. Used as either a client or server +certificate. + +\item \underline{TLS Key} \emph{(path)} +Path to PEM encoded TLS private key. Must correspond with the TLS +certificate. + +\item \underline{TLS Verify Peer} \emph{(yes/no)} +Verify peer certificate. Instructs server to request and verify the +client's x509 certificate. Any client certificate signed by a known-CA +will be accepted unless the TLS Allowed CN configuration directive is used. +Not valid in a client context. + +\item \underline{TLS Allowed CN} \emph{(string list)} +Common name attribute of allowed peer certificates. If directive is +specified, all client certificates will be verified against this list. +This directive may be specified more than once. Not valid in a client +context. + +\item \underline{TLS CA Certificate File} \emph{(path)} +Path to PEM encoded TLS CA certificate(s). Multiple certificates are +permitted in the file. One of \emph{TLS CA Certificate File} or \emph{TLS +CA Certificate Dir} are required in a server context if \underline{TLS +Verify Peer} is also specified, and are always required in a client +context. + +\item \underline{TLS CA Certificate Dir} \emph{(path)} +Path to TLS CA certificate directory. In the current implementation, +certificates must be stored PEM encoded with OpenSSL-compatible hashes. +One of \emph{TLS CA Certificate File} or \emph{TLS CA Certificate Dir} are +required in a server context if \emph{TLS Verify Peer} is also specified, +and are always required in a client context. + +\item \underline{TLS DH File} \emph{(path)} +Path to PEM encoded Diffie-Hellman parameter file. If this directive is +specified, DH ephemeral keying will be enabled, allowing for forward +secrecy of communications. This directive is only valid within a server +context. To generate the parameter file, you may use openssl: +\footnotesize +\begin{verbatim} +openssl dhparam -out dh1024.pem -5 1024 +\end{verbatim} +\normalsize +\end{itemize} + +\section{TLS API Implementation} +\index{TLS API Implimentation} +\index{API Implimentation!TLS} +\addcontentsline{toc}{section}{TLS API Implementation} + +To facilitate the use of additional TLS libraries, all OpenSSL-specific +code has been implemented within \emph{src/lib/tls.c}. In turn, a generic +TLS API is exported. + +\subsection{Library Initialization and Cleanup} +\index{Library Initialization and Cleanup} +\index{Initialization and Cleanup!Library} +\addcontentsline{toc}{subsection}{Library Initialization and Cleanup} + +\footnotesize +\begin{verbatim} +int init_tls (void); +\end{verbatim} +\normalsize + +Performs TLS library initialization, including seeding of the PRNG. PRNG +seeding has not yet been implemented for win32. + +\footnotesize +\begin{verbatim} +int cleanup_tls (void); +\end{verbatim} +\normalsize + +Performs TLS library cleanup. + +\subsection{Manipulating TLS Contexts} +\index{TLS Context Manipulation} +\index{Contexts!Manipulating TLS} +\addcontentsline{toc}{subsection}{Manipulating TLS Contexts} + +\footnotesize +\begin{verbatim} +TLS_CONTEXT *new_tls_context (const char *ca_certfile, + const char *ca_certdir, const char *certfile, + const char *keyfile, const char *dhfile, bool verify_peer); +\end{verbatim} +\normalsize + +Allocates and initalizes a new opaque \emph{TLS\_CONTEXT} structure. The +\emph{TLS\_CONTEXT} structure maintains default TLS settings from which +\emph{TLS\_CONNECTION} structures are instantiated. In the future the +\emph{TLS\_CONTEXT} structure may be used to maintain the TLS session +cache. \emph{ca\_certfile} and \emph{ca\_certdir} arguments are used to +initialize the CA verification stores. The \emph{certfile} and +\emph{keyfile} arguments are used to initialize the local certificate and +private key. If \emph{dhfile} is non-NULL, it is used to initialize +Diffie-Hellman ephemeral keying. If \emph{verify\_peer} is \emph{true} , +client certificate validation is enabled. + +\footnotesize +\begin{verbatim} +void free_tls_context (TLS_CONTEXT *ctx); +\end{verbatim} +\normalsize + +Deallocated a previously allocated \emph{TLS\_CONTEXT} structure. + +\subsection{Performing Post-Connection Verification} +\index{TLS Post-Connection Verification} +\index{Verification!TLS Post-Connection} +\addcontentsline{toc}{subsection}{Performing Post-Connection Verification} + +\footnotesize +\begin{verbatim} +bool tls_postconnect_verify_host (TLS_CONNECTION *tls, const char *host); +\end{verbatim} +\normalsize + +Performs post-connection verification of the peer-supplied x509 +certificate. Checks whether the \emph{subjectAltName} and +\emph{commonName} attributes match the supplied \emph{host} string. +Returns \emph{true} if there is a match, \emph{false} otherwise. + +\footnotesize +\begin{verbatim} +bool tls_postconnect_verify_cn (TLS_CONNECTION *tls, alist *verify_list); +\end{verbatim} +\normalsize + +Performs post-connection verification of the peer-supplied x509 +certificate. Checks whether the \emph{commonName} attribute matches any +strings supplied via the \emph{verify\_list} parameter. Returns +\emph{true} if there is a match, \emph{false} otherwise. + +\subsection{Manipulating TLS Connections} +\index{TLS Connection Manipulation} +\index{Connections!Manipulating TLS} +\addcontentsline{toc}{subsection}{Manipulating TLS Connections} + +\footnotesize +\begin{verbatim} +TLS_CONNECTION *new_tls_connection (TLS_CONTEXT *ctx, int fd); +\end{verbatim} +\normalsize + +Allocates and initializes a new \emph{TLS\_CONNECTION} structure with +context \emph{ctx} and file descriptor \emph{fd}. + +\footnotesize +\begin{verbatim} +void free_tls_connection (TLS_CONNECTION *tls); +\end{verbatim} +\normalsize + +Deallocates memory associated with the \emph{tls} structure. + +\footnotesize +\begin{verbatim} +bool tls_bsock_connect (BSOCK *bsock); +\end{verbatim} +\normalsize + +Negotiates a a TLS client connection via \emph{bsock}. Returns \emph{true} +if successful, \emph{false} otherwise. Will fail if there is a TLS +protocol error or an invalid certificate is presented + +\footnotesize +\begin{verbatim} +bool tls_bsock_accept (BSOCK *bsock); +\end{verbatim} +\normalsize + +Accepts a TLS client connection via \emph{bsock}. Returns \emph{true} if +successful, \emph{false} otherwise. Will fail if there is a TLS protocol +error or an invalid certificate is presented. + +\footnotesize +\begin{verbatim} +bool tls_bsock_shutdown (BSOCK *bsock); +\end{verbatim} +\normalsize + +Issues a blocking TLS shutdown request to the peer via \emph{bsock}. This function may not wait for the peer's reply. + +\footnotesize +\begin{verbatim} +int tls_bsock_writen (BSOCK *bsock, char *ptr, int32_t nbytes); +\end{verbatim} +\normalsize + +Writes \emph{nbytes} from \emph{ptr} via the \emph{TLS\_CONNECTION} +associated with \emph{bsock}. Due to OpenSSL's handling of \emph{EINTR}, +\emph{bsock} is set non-blocking at the start of the function, and restored +to its original blocking state before the function returns. Less than +\emph{nbytes} may be written if an error occurs. The actual number of +bytes written will be returned. + +\footnotesize +\begin{verbatim} +int tls_bsock_readn (BSOCK *bsock, char *ptr, int32_t nbytes); +\end{verbatim} +\normalsize + +Reads \emph{nbytes} from the \emph{TLS\_CONNECTION} associated with +\emph{bsock} and stores the result in \emph{ptr}. Due to OpenSSL's +handling of \emph{EINTR}, \emph{bsock} is set non-blocking at the start of +the function, and restored to its original blocking state before the +function returns. Less than \emph{nbytes} may be read if an error occurs. +The actual number of bytes read will be returned. + +\section{Bnet API Changes} +\index{Bnet API Changes} +\index{API Changes!Bnet} +\addcontentsline{toc}{section}{Bnet API Changes} + +A minimal number of changes were required in the Bnet socket API. The BSOCK +structure was expanded to include an associated TLS\_CONNECTION structure, +as well as a flag to designate the current blocking state of the socket. +The blocking state flag is required for win32, where it does not appear +possible to discern the current blocking state of a socket. + +\subsection{Negotiating a TLS Connection} +\index{Negotiating a TLS Connection} +\index{TLS Connection!Negotiating} +\addcontentsline{toc}{subsection}{Negotiating a TLS Connection} + +\emph{bnet\_tls\_server()} and \emph{bnet\_tls\_client()} were both +implemented using the new TLS API as follows: + +\footnotesize +\begin{verbatim} +int bnet_tls_client(TLS_CONTEXT *ctx, BSOCK * bsock); +\end{verbatim} +\normalsize + +Negotiates a TLS session via \emph{bsock} using the settings from +\emph{ctx}. Returns 1 if successful, 0 otherwise. + +\footnotesize +\begin{verbatim} +int bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list); +\end{verbatim} +\normalsize + +Accepts a TLS client session via \emph{bsock} using the settings from +\emph{ctx}. If \emph{verify\_list} is non-NULL, it is passed to +\emph{tls\_postconnect\_verify\_cn()} for client certificate verification. + +\subsection{Manipulating Socket Blocking State} +\index{Manipulating Socket Blocking State} +\index{Socket Blocking State!Manipulating} +\index{Blocking State!Socket!Manipulating} +\addcontentsline{toc}{subsection}{Manipulating Socket Blocking State} + +Three functions were added for manipulating the blocking state of a socket +on both Win32 and Unix-like systems. The Win32 code was written according +to the MSDN documentation, but has not been tested. + +These functions are prototyped as follows: + +\footnotesize +\begin{verbatim} +int bnet_set_nonblocking (BSOCK *bsock); +\end{verbatim} +\normalsize + +Enables non-blocking I/O on the socket associated with \emph{bsock}. +Returns a copy of the socket flags prior to modification. + +\footnotesize +\begin{verbatim} +int bnet_set_blocking (BSOCK *bsock); +\end{verbatim} +\normalsize + +Enables blocking I/O on the socket associated with \emph{bsock}. Returns a +copy of the socket flags prior to modification. + +\footnotesize +\begin{verbatim} +void bnet_restore_blocking (BSOCK *bsock, int flags); +\end{verbatim} +\normalsize + +Restores blocking or non-blocking IO setting on the socket associated with +\emph{bsock}. The \emph{flags} argument must be the return value of either +\emph{bnet\_set\_blocking()} or \emph{bnet\_restore\_blocking()}. + +\pagebreak + +\section{Authentication Negotiation} +\index{Authentication Negotiation} +\index{Negotiation!TLS Authentication} +\addcontentsline{toc}{section}{Authentication Negotiation} + +Backwards compatibility with the existing SSL negotiation hooks implemented +in src/lib/cram-md5.c have been maintained. The +\emph{cram\_md5\_get\_auth()} function has been modified to accept an +integer pointer argument, tls\_remote\_need. The TLS requirement +advertised by the remote host is returned via this pointer. + +After exchanging cram-md5 authentication and TLS requirements, both the +client and server independently decide whether to continue: + +\footnotesize +\begin{verbatim} +if (!cram_md5_get_auth(dir, password, &tls_remote_need) || + !cram_md5_auth(dir, password, tls_local_need)) { +[snip] +/* Verify that the remote host is willing to meet our TLS requirements */ +if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && + tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server did not advertise required TLS support.\n")); + auth_success = false; + goto auth_done; +} + +/* Verify that we are willing to meet the remote host's requirements */ +if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && + tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server requires TLS.\n")); + auth_success = false; + goto auth_done; +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/developers/translate_images.pl b/docs/manuals/de/developers/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/developers/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/developers/update_version b/docs/manuals/de/developers/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/developers/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/developers/update_version.in b/docs/manuals/de/developers/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/developers/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/developers/version.tex b/docs/manuals/de/developers/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/developers/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/developers/version.tex.in b/docs/manuals/de/developers/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/developers/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/de/install/Makefile b/docs/manuals/de/install/Makefile new file mode 100644 index 00000000..0edc87f6 --- /dev/null +++ b/docs/manuals/de/install/Makefile @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=install + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Installation and Configuration Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Instal_Config_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/install/Makefile.in b/docs/manuals/de/install/Makefile.in new file mode 100644 index 00000000..0edc87f6 --- /dev/null +++ b/docs/manuals/de/install/Makefile.in @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=install + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Installation and Configuration Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Instal_Config_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/install/Makefile.save b/docs/manuals/de/install/Makefile.save new file mode 100644 index 00000000..8a1708ab --- /dev/null +++ b/docs/manuals/de/install/Makefile.save @@ -0,0 +1,101 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# + +IMAGES=../../../images + +first_rule: bacula + +bacula: tex web html dvipdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + touch install.idx installi-general.tex + -latex -interaction=batchmode install.tex + makeindex install.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode install.tex + +pdf: + @echo "Making install pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf install.dvi install.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making install pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 install.dvi + @rm -f *.eps *.old + +html: + @echo "Making install html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names install.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + install >/dev/null + ./translate_images.pl --to_meaningful_names install.html + @rm -f *.eps *.gif *.jpg *.old + +web: + @echo "Making install web" + @mkdir -p install + @rm -f install/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png install/ + @rm -f install/next.eps install/next.png install/prev.eps install/prev.png install/up.eps install/up.png + @(if [ -f install/imagename_translations ] ; then \ + ./translate_images.pl --to_meaningful_names install/Bacula_Users_Guide.html; \ + fi) + @rm -rf install/*.html + latex2html -split 3 -local_icons -t "Developer's Guide" \ + -long_titles 4 -contents_in_nav -toc_stars -white \ + -notransparent install >/dev/null + ./translate_images.pl --to_meaningful_names install/install_Guide.html + @cp -f install/install_Guide.html install/index.html + @rm -f *.eps *.gif *.jpg install/*.eps *.old + @rm -f install/idle.png + @rm -f install/win32-*.png install/wx-console*.png install/xp-*.png + @rm -f install/*.pl install/*.log install/*.aux install/*.idx + @rm -f install/*.out WARNINGS + +texcheck: + ./check_tex.pl install.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf install + @rm -f images.tex installi-general.tex + + +distclean: clean + @rm -f install.html install.pdf diff --git a/docs/manuals/de/install/autochangerres.tex b/docs/manuals/de/install/autochangerres.tex new file mode 100644 index 00000000..98563c77 --- /dev/null +++ b/docs/manuals/de/install/autochangerres.tex @@ -0,0 +1,107 @@ +%% +\chapter{Autochanger Resource} +\index[sd]{Autochanger Resource} +\index[sd]{Resource!Autochanger} + +The Autochanger resource supports single or multiple drive +autochangers by grouping one or more Device resources +into one unit called an autochanger in Bacula (often referred to +as a "tape library" by autochanger manufacturers). + +If you have an Autochanger, and you want it to function correctly, +you {\bf must} have an Autochanger resource in your Storage +conf file, and your Director's Storage directives that want to +use an Autochanger {\bf must} refer to the Autochanger resource name. +In previous versions of Bacula, the Director's Storage directives +referred directly to Device resources that were autochangers. +In version 1.38.0 and later, referring directly to Device resources +will not work for Autochangers. + +\begin{description} +\item [Name = \lt{}Autochanger-Name\gt{}] + \index[sd]{Name} + Specifies the Name of the Autochanger. This name is used in the + Director's Storage definition to refer to the autochanger. This + directive is required. + +\item [Device = \lt{}Device-name1, device-name2, ...\gt{}] + Specifies the names of the Device resource or resources that correspond + to the autochanger drive. If you have a multiple drive autochanger, you + must specify multiple Device names, each one referring to a separate + Device resource that contains a Drive Index specification that + corresponds to the drive number base zero. You may specify multiple + device names on a single line separated by commas, and/or you may + specify multiple Device directives. This directive is required. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + The specified {\bf name-string} gives the system file name of the autochanger + device name. If specified in this resource, the Changer Device name + is not needed in the Device resource. If it is specified in the Device + resource (see above), it will take precedence over one specified in + the Autochanger resource. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command } + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows. + If it is specified here, it need not be specified in the Device + resource. If it is also specified in the Device resource, it will take + precedence over the one specified in the Autochanger resource. + +\end{description} + +The following is an example of a valid Autochanger resource definition: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "DDS-4-changer" + Device = DDS-4-1, DDS-4-2, DDS-4-3 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} +Device { + Name = "DDS-4-1" + Drive Index = 0 + Autochanger = yes + ... +} +Device { + Name = "DDS-4-2" + Drive Index = 1 + Autochanger = yes + ... +Device { + Name = "DDS-4-3" + Drive Index = 2 + Autochanger = yes + Autoselect = no + ... +} +\end{verbatim} +\normalsize + +Please note that it is important to include the {\bf Autochanger = yes} directive +in each Device definition that belongs to an Autochanger. A device definition +should not belong to more than one Autochanger resource. Also, your Device +directive in the Storage resource of the Director's conf file should have +the Autochanger's resource name rather than a name of one of the Devices. + +If you have a drive that physically belongs to an Autochanger but you don't want +to have it automatically used when Bacula references the Autochanger for backups, +for example, you want to reserve it for restores, you can add the directive: + +\footnotesize +\begin{verbatim} +Autoselect = no +\end{verbatim} +\normalsize + +to the Device resource for that drive. In that case, Bacula will not automatically +select that drive when accessing the Autochanger. You can, still use the drive +by referencing it by the Device name directly rather than the Autochanger name. An example +of such a definition is shown above for the Device DDS-4-3, which will not be +selected when the name DDS-4-changer is used in a Storage definition, but will +be used if DDS-4-3 is used. diff --git a/docs/manuals/de/install/check_tex.pl b/docs/manuals/de/install/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/install/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/install/configure.tex b/docs/manuals/de/install/configure.tex new file mode 100644 index 00000000..6f7376a3 --- /dev/null +++ b/docs/manuals/de/install/configure.tex @@ -0,0 +1,383 @@ +%% +%% + +\chapter{Anpassen der Konfigurations-Dateien} +\label{ConfigureChapter} +\index[general]{Dateien!Anpassen der Konfigurations } +\index[general]{Anpassen der Konfigurations-Dateien } + +Jedes einzelne der Bacula Programme liest beim Starten die angegebene Konfigurations-Datei ein, +falls keine angegeben wird benutzt Bacula jeweils die Standard-Konfigurations-Dateien {\bf bacula-dir.conf}, {\bf +bacula-fd.conf}, {\bf bacula-sd.conf}, oder {\bf console.conf} f\"{u}r den Director-Dienst, den Client-Dienst, +den Storage-Dienst und f\"{u}r das Console-Programm. + +Jeder Dienst (Director,Client, Storage und Console) hat seine eigene Konfigurations-Datei die eine Reihe von +Eintr\"{a}gen enth\"{a}lt. Die Eintr\"{a}ge sind sehr \"{a}hnlich, aber die angegebenen Parameter sind von +Dienst zu Dienst unterschiedlich. Zum Beispiel wird in der Director-Dienst-Konfiguration mit dem Eintrag +{\bf Director} der Name des Director-Dienstes, eine Reihe globaler Parameter, sowie das Director-Passwort festgelegt. +Der {\bf Director}-Eintrag im Client-Dienst gibt an, welcher Director-Dienst diesen Client kontaktieren darf. + +Bevor Sie Bacula zum ersten mal starten, m\"{u}ssen Sie die Konfigurations-Dateien f\"{u}r jeden Dienst anpassen. +Standard-Konfigurations-Dateien werden f\"{u}r jeden Dienst bei der Installation erzeugt, aber m\"{u}ssen Ihrem Computer +angepasst werden. Einen \"{U}berblick \"{u}ber die Konfigurations-Eintr\"{a}ge sehen Sie hier: + +\addcontentsline{lof}{figure}{Bacula Objects} +\includegraphics{./bacula-objects.eps} +\\ +(vielen Dank an Aristides Maniatis f\"{u}r diese Graphik) +\label{ResFormat} + +\section{Zeichens\"{a}tze} +\index[general]{Zeichens\"{a}tze} +Bacula wurde so entwickelt, dass es die meisten Zeichens\"{a}tze der Welt versteht, +US ASCII, deutsch, französich, chinesisch, ..... Allerdings tut es dies, indem es +alles in UTF-8 umwandelt und Bacula erwartet, dass alle Konfigurationsdateien +(auch die auf Win32-Computern) als UTF-8-Format vorliegen. Normalerweise ist UTF-8 +der Standard-Zeichensatz auf Linux-Computern, aber eventuell nicht auf anderen +Unix-Varianten oder auf Windows. Sie sollten also sicherstellen, dass die entsprechenden +Umgebungsvariablen richtig gesetzt sind, befor Sie Bacula starten. + +Damit Bacula auch Konfigurations-Dateien mit fremden Zeichen korrekt lesen kann, +muss die Umgebungsvariable {bf LANG} mit {\bf .UTF-8} enden, zum Beispiel {\bf en\_US.UTF-8}. +Die Schreibweise kann bei verschiedenen Betriebssystemen variieren und ebenso kann auch die +Umgebungsvariable anders hei{\ss}en. Auf neueren Win32-Computern k\"{o}nnen Sie beim speichern +der Konfigurations-Dateien z.B. mit dem {\bf notepad} angeben, dass die Datei als UTF-8 +gespeichert werden soll. + +Bacula nimmt an, dass alle Dateinamen auf Linux und Unix im UTF-8-Format sind. +Bei Windows sind sie Unicode (UTF-16) und werden automatisch in UTF-8 umgewandelt. + +\section{Konfigurations-Parameter-Format} +\index[general]{Konfigurations-Parameter-Format } +\index[general]{Format!Konfigurations-Parameter } + +Auch wenn Sie nicht jedes Detail \"{u}ber alle Paramter wissen m\"{u}ssen, +ist ein grundlegendes Wissen des Konfigurations-Parameter-Formats erforderlich. +Jeder Konfigurations-Eintrag in einer Ressource (innerhalb der geschweiften Klammern) +ist zusammengesetzt aus dem Schl\"{u}sselwort gefolgt von einem Gleichheitszeichen, +dem dann ein oder mehrere Werte folgen. Das Schl\"{u}sselwort muss einem der +Bacula bekannten Konfigurations-Parameter entsprechen, wobei es gro{\ss}e oder kleine +Buchstaben enthalten darf, sowie auch Leerzeichen. + +Jede Ressource muss einen Paramter {\bf Name} beinhalten und kann zus\"{a}tzlich +eine optionale {\bf Description} enthalten. Der Name wird ben\"{o}tigt um die Ressource +eindeutig zu bezeichnen. Die Description wird verwendet wenn die Ressource angezeigt wird, +um eine leichtere Erkennung zu erm\"{o}glichen. +Ein Beispiel: + +\footnotesize +\begin{verbatim} +Director { + Name = "MeinDir" + Description = "Bacula Director" + WorkingDirectory = "$HOME/bacula/bin/working" +} +\end{verbatim} +\normalsize + +Diese Ressource definiert einen Director mit dem Namen "MeinDir" und dem Arbeitsverzeichnis +\$HOME/bacula/bin/working. Falls Sie Leerzeichen in einem Parameter verwenden wollen +(rechts vom Gleichheitszeichen) m\"{u}ssen Sie den Eintrag in doppelte Anf\"{u}hrungszeichen setzen. +Andernfalls sind Anf\"{u}hrungszeichen nicht n\"{o}tig. + +\label{Comments} +\subsection{Kommentare} +\index[general]{Kommentare} + +Wenn Bacula die Konfigurations-Dateien liest, werden leere Zeilen und alles hinter einem +Rautezeichen (\#) bis zum Zeilenende ignoriert. Ein Semikolon (;) wird als logisches +Zeilenende interprtiert und alles hinter dem Semikolon wird als n\"{a}chster +Konfigurations-Eintrag betrachtet. Wenn ein Eintrag in einer eigenen Zeile steht, +wird kein abschlie{\ss}endes Semikolon ben\"{o}tigt, in den Beispielen in diesem +Handbuch werden Sie daher kaum Semikolons finden. + + +\label{Case1} + +\subsection{Gro{\ss}/Kleinschreibung und Leerzeichen} +\index[general]{Leerzeichen!Gro{\ss}/Kleinschreibung} +\index[general]{Gro{\ss}/Kleinschreibung und Leerzeichen} + +Gro{\ss}/Kleinschreibung und Leerzeichen werden beim lesen der Schl\"{u}sselw\"{o}rter +(dem Teil vor dem Gleichheitszeichen) komplett ignoriert. + +Das bedeutet, dass die Schl\"{u}sselw\"{o}rter {\bf name}, {\bf Name} und {\bf N a m e} +alle identisch sind. + +Leerzeichen hinter dem Gleichheitszeichen, vor dem ersten Zeichen des Wertes +werden auch ignoriert. + +Generell werden Leerzeichen innerhalb eines Wertes nicht ignoriert, +wenn Leerzeichen im Wert vorhanden sind, muss der Wert in doppelte Anf\"{u}hrungszeichen +gesetzt werden. Namen d\"{u}rfen bis zu 127 Zeichen enthalten. Ein Name darf aus allen +ASCII-Zeichen bestehen. Innerhalb eine Zeichenkette die in doppelten Anf\"{u}hrungszeichen steht +kann man mit dem Backslash (umgekehrter Schr\"{a}gstrich \textbackslash{}) ein Zeichen maskieren, +damit es als es selbst dargestellt wird (praktisch um Anf\"{u}hrungszeichen und geschweifte Klammern +einzuf\"{u}gen). + +Bitte beachten Sie, dass Bacula Ressource-Namen, sowie bestimmte andere Namen (z.B. Volume-Namen), +nur aus Buchstaben, Zahlen und ein paar Sonderzeichen (Leerzeichen, Unterstrich,..) bestehen d\"{u}rfen. +Alle anderen Zeichen sind nicht erlaubt. + +\label{Includes} +\subsection{Einbinden anderer Konfigurations-Dateien} +\index[general]{Einbinden anderer Konfigurations-Dateien } +\index[general]{Dateien!Einbinden anderer Konfigurations } +\index[general]{Benutzung von @ zum einbinden anderer Dateien} +\index[general]{@{\bf Dateiname}} + +Falls Sie Ihre Konfiguration auf mehrere kleine Dateien aufteilen m\"{o}chten, +k\"{o}nnen Sie das tun, indem Sie andere Konfigurations-Dateien mit @{\bf Dateiname} einbinden. +Dabei muss @{\bf Dateiname} den absoluten Pfad und Dateinamen enthalten. Die Angabe @Dateiname +darf an jeder Stelle stehen, wo auch eine Konfigurationsangabe stehen kann. + +\label{DataTypes} +\subsection{grundlegende Datentypen} +\index[general]{Datentypen!grundlegende } +\index[general]{grundlegende Datentypen } + +Beim einlesen der Konfigurations-Parameter klassifiziert Bacula die Daten +gem\"{a}{\ss} den unten aufgelisteten Datentypen. Wenn Sie dass das erstemal lesen, +wird es Ihnen eventuell etwas kompliziert vorkommen, aber in Wahrheit ist es ganz einfach und logisch. + +\begin{description} + +\item [name] + \index[fd]{name} + Ein Schl\"{u}sselwort oder Name besteht aus alphanumerischen Zeichen, +einschlie{\ss}lich Bindestrich, Unterstrich und Dollar-Zeichen. Das erste Zeichen eines {\bf Name} +muss ein Buchstabe sein. Ein Name hat eine maximale L\"{a}nge von 127 Zeichen. Typischerweise stehen +Schl\"{u}sselw\"{o}rter auf der linken Seite des Gleichheitszeichens (d.h. es sind +Bacula-Schl\"{u}sselw\"{o}rter z.B. Konfigurations-Eintrag-Namen oder Parameter-Namen). +Schl\"{u}sselw\"{o}rter d\"{u}rfen nicht in Anf\"{u}hrungszeichen stehen. + +\item [name-string] + \index[fd]{name-string} + Ein Name-String ist \"{a}hnlich einem Namen, au{\ss}er das er in Anf\"{u}hrungszeichen stehen darf +und daher auch Lerrzeichen beinhalten kann. Ein Name-String darf 127 Zeichen lang sein und steht +typischerweise auf der rechten Seite des Gleichheitszeichens (d.h. es sind Werte die zum einem +Schl\"{u}sselwort geh\"{o}hren). + +\item [string] + \index[fd]{string} + Ein String ist eine Zeichenkette die, in Anf\"{u}hrungszeichen gestellt, jedes beliebige Zeichen enthalten darf. +Ein String hat keine L\"{a}ngenbegrenzung. Strings sind typischerweise Werte die Dateinamen, Verzeichnisnamen oder +Betriebssystem-Befehlen entsprechen. Ein Backslash (umgekehrter Schr\"{a}gstrich \textbackslash{}) maskiert das folgende +Zeichen als sich selbst, dadurch kann man Anf\"{u}hrungszeichen innerhalb des Strings verwenden, oder auch den Backslash selbst. + +\item [directory] + \index[dir]{directory} + A directory is either a quoted or non-quoted string. A directory will be +passed to your standard shell for expansion when it is scanned. Thus +constructs such as {\bf \$HOME} are interpreted to be their correct values. + +\item [password] + \index[dir]{password} + Ist ein Bacula-Passwort und wird intern als MD5-Hash gespeichert. + +\item [integer] + \index[dir]{integer} + Eine 32-Bit Ganzzahl, positiv oder negativ. + +\item [positive integer] + \index[dir]{positive integer } + Eine positive 32Bit-Ganzzahl. + +\item [long integer] + \index[dir]{long integer} + Eine 64-Bit Ganzzahl. Typischerweise f\"{u}r Werte wie Bytes die \"{u}ber 4 Millionen +betragen k\"{o}nnen und daher 64-Bit erfordern. + +\item [yes|no] + \index[dir]{yes or no } + Entweder ein Ja: {\bf yes} oder ein Nein: {bf no}. + +\label{Size1} +\item [size] +\index[dir]{size} + Eine Gr\"{o}{\ss}e angegeben in Bytes. Typischerweise eine Flie{\ss}kommazahl in wissenschaftlicher Schreibweise, +gefolgt von einem Modifikator. Intern als 64-Bit Ganzzahl gespeichert. Wenn ein Modofikator angegeben wird, muss er +direkt und ohne Leerzeichen dem Wert folgen. +Die folgenden Modifikatoren sind erlaubt: + +\begin{description} +\item [k] + 1,024 (Kilobytes) + +\item [kb] + 1,000 (Kilobytes) + +\item [m] + 1,048,576 (Megabytes) + +\item [mb] + 1,000,000 (Megabytes) + +\item [g] + 1,073,741,824 (Gigabytes) + +\item [gb] + 1,000,000,000 (Gigabytes) +\end{description} + +\label{Time} +\item [time] +\index[dir]{time} + Eine Zeit oder ein Zeitraum in Sekunden. Intern als 64-Bit Ganzzahl gespeichert, +allerdings in zwei Teilen: ein Nummern-Teil und ein Modifikator-Teil. Die Nummer kann eine +Ganz- oder Flie{\ss}kommazahl sein. Wenn sie als Flie{\ss}kommazahl angegeben wird, wird auf +den n\"{a}chsten Ganzzahl-Wert gerundet. Der Modifikator ist zwingend erforderlich und mu{\ss} dem Nummern-teil folgen +(entweder durch Leerzeichen getrennt oder nicht). Die folgenden Modifikatoren sind erlaubt: + +\begin{description} + +\item [seconds] + \index[dir]{seconds} + Sekunden + +\item [minutes] + \index[dir]{minutes} + Minuten (60 Sekunden) + +\item [hours] + \index[dir]{hours } + Stunden (3600 Sekunden) + +\item [days] + \index[dir]{days} + Tage (3600*24 Sekunden) + +\item [weeks] + \index[dir]{weeks} + Wochen (3600*24*7 Sekunden) + +\item [months] + \index[dir]{months } + Monate (3600*24*30 Sekunden) + +\item [quarters] + \index[dir]{quarters } + Quartale (3600*24*91 Sekunden) + +\item [years] + \index[dir]{years } + Jahre (3600*24*365 Sekunden) +\end{description} + +Jede Abk\"{u}rzung dieser Modifikatoren ist erlaubt (d.h. {\bf Sekunden} k\"{o}nnen als +{\bf sec} oder {\bf s} angegeben werden). Ein {\bf m} wird als Monat angenommen. + +Die Angabe einer Zeit kann so viele Modifikatoren und Nummern enthalten, wie gew\"{u}nscht. +Ein Beispiel: + +\footnotesize +\begin{verbatim} +1 week 2 days 3 hours 10 mins +1 month 2 days 30 sec + +\end{verbatim} +\normalsize + +sind g\"{u}ltige Zeitangaben. + +\end{description} + +\label{ResTypes} +\section{Ressource Typen} +\index[general]{Typen!Ressource } +\index[general]{Ressource Typen } + +Die folgende Tabelle listet alle momentan von Bacula verwendeten Konfigurations-Eintr\"{a}ge auf. +Sie zeigt, welche Eintr\"{a}ge bei welchem Dienst vorhanden sein m\"{u}{\ss}en. Die Standard-Konfigurations-Dateien +beinhalten bereits mindestens ein Beispiel jedes ben\"{o}tigten Eintrags. Sie brauchen sich also keine Sorgen zu machen, +dass Sie diese Eintr\"{a}ge alle von Hand erstellen m\"{u}{\ss}en. + +\addcontentsline{lot}{table}{Ressource Typen} +\begin{longtable}{|l|l|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Ressource } & \multicolumn{1}{c| }{\bf Director } & +\multicolumn{1}{c| }{\bf Client } & \multicolumn{1}{c| }{\bf Storage } & +\multicolumn{1}{c| }{\bf Console } \\ + \hline +{Autochanger } & {Nein } & {Nein } & {Ja } & {Nein } \\ +\hline +{Catalog } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{Client } & {Ja } & {Ja } & {Nein } & {Nein } \\ + \hline +{Console } & {Ja } & {Nein } & {Nein } & {Ja } \\ + \hline +{Device } & {Nein } & {Nein } & {Ja } & {Nein } \\ + \hline +{Director } & {Ja } & {Ja } & {Ja } & {Ja } \\ + \hline +{FileSet } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{Job } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{JobDefs } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{Message } & {Ja } & {Ja } & {Ja } & {Nein } \\ + \hline +{Pool } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{Schedule } & {Ja } & {Nein } & {Nein } & {Nein } \\ + \hline +{Storage } & {Ja } & {Nein } & {Ja } & {Nein } +\\ \hline + +\end{longtable} + +\section{Namen, Passw\"{o}rter und Autorisation} +\label{Names} +\index[general]{Autorisation!Namen Passw\"{o}rter und } +\index[general]{Namen, Passw\"{o}rter und Autorisation } +\index[general]{Passw\"{o}rter} + +Damit ein Dienst mir einem anderen Kontakt aufnehmen darf, muss er sich mit einem Passwort autorisieren. +In den meisten F\"{a}llen geh\"{o}hrt ein Passwort zu einem bestimmten Namen, es muss also der Name und das Passwort +korrekt sein, um erfolgreich autorisiert zu werden. Passw\"{o}rter sind einfacher und beliebiger Text. Sie werden +nicht durch einen speziellen Prozess generiert; benutzen Sie einfach zuf\"{a}lligen Text. + +Die Standard-Konfigurations-Dateien enthalten automatisch erzeugte Passw\"{o}rter, die eine erfolgreiche Autorisierung +aller Dienste untereinander erlauben. Wenn Sie diese Passw\"{o}rter ver\"{a}ndern, m\"{u}{\ss}en Sie das auch auf der +entsprechenden Gegenseite tun. + +Hier ist ein Bild, worauf Sie sehen k\"{o}nnen, welche Namen und Passw\"{o}rter in welchen Dateien und Konfigurations-Eintr\"{a}gen +\"{u}bereinstimmen m\"{u}{\ss}en: + +\includegraphics{./Conf-Diagram.eps} + +Auf der linken Seite sehen Sie die Director-, Storage- und Client-Eintr\"{a}ge mit ihren Namen und Passw\"{o}rtern, +dieses steht alles in der Konfiguration des Director-Dienstes in der Datei {\bf bacula-dir.conf}. Auf der rechten Seite +sehen Sie die entsprechenden Eintr\"{a}ge in den Konfigurations-Dateien des Storage- und Client-Dienstes (SD und FD). + +Bitte beachten Sie, dass die Adresse {\bf fw-sd}, die in der Konfiguration des Storage-Dienstes steht, dem Client-Dienst +symbolisch \"{u}bergeben wird. Der Client-Dienst muss diesen Namen dann in eine g\"{u}ltigen IP-Adresse aufl\"{o}sen k\"{o}nnen. +Aus diesem Grund muss hier etweder eine IP-Adresse oder ein voll qualifizierter Rechnername stehen. Ein Name wie {\bf localhost} +ist nicht g\"{u}ltig und wird auf dem Client auf den Namen des localhost des Clients aufge\"{o}st. Das Passwort des Client-Dienstes +um sich am Storage-Dienst anzumelden ist tempor\"{a}r und wird dynamisch f\"{u}r jeden einzelnen Job erzeugt. Es steht also in keiner +der .conf-Dateien. + +\section{detailierte Information f\"{u}r jeden Dienst} +\index[general]{detailierte Information f\"{u}r jeden Dienst } +\index[general]{Dienst!detailierte Information f\"{u}r jeden } + +Die Details f\"{u}r jeden Konfigurations-Eintrag und die darin g\"{u}ltigen Parameter sind in den folgenden Kapiteln beschrieben. + +Die folgenden Konfigurations-Dateien m\"{u}{\ss}en definiert werden: + +\begin{itemize} +\item + \ilink{bconsole.conf}{ConsoleConfChapter} -- um die Konfiguration f\"{u}r das Console-Programm zu definieren +(die Benutzerschnittstelle zum Director-Dienst). Hier wird angegeben welche Director-Dienste verf\"{u}gbar sind, um mit ihnen zu arbeiten. + \item + \ilink{bacula-dir.conf}{DirectorChapter} -- um die Konfiguration des Director-Dienstes zu definieren. In dieser Datei geben Sie alle Storage- und +Client-Dienste an. +\item + \ilink{bacula-fd.conf}{FiledConfChapter} -- um die Konfiguration des Clients zu definieren. Diese Datei wird auf jedem Backup-Client ben\"{o}tigt. +\item + \ilink{bacula-sd.conf}{StoredConfChapter} -- um die Konfiguration des Storage-Dienstes zu definieren. Normalerweise werden Sie einen Storage-Dienst +haben, der Ihr Bandlaufwerk steuert. Wenn Sie mehrere Rechner mit angeschlossenen Bandlaufwerken haben, ben\"{o}tigen Sie nat\"{u}rlich einen Storage-Dienst +pro Rechner. +\end{itemize} diff --git a/docs/manuals/de/install/consoleconf.tex b/docs/manuals/de/install/consoleconf.tex new file mode 100644 index 00000000..768c1aec --- /dev/null +++ b/docs/manuals/de/install/consoleconf.tex @@ -0,0 +1,323 @@ +%% +%% + +\chapter{Console Konfiguration} +\label{ConsoleConfChapter} +\index[general]{Konfiguration!Console} +\index[general]{Console Konfiguration} + +\section{Allgemein} + +Die Console-Konfigurations-Datei ist die einfachste Konfigurations-Datei von allen. +Normalerweise m\"{u}{\ss}en Sie in dieser Datei nicht au{\ss}er dem Passwort \"{a}ndern. +Diese Datei enth\"{a}lt alle Informationen die n\"{o}tig sind, damit sich das Console-Programm +zu dem Director-Dienst verbinden kann und darf. + +F\"{u}r eine allgemeine \"{U}bersicht der Syntax der Konfigurations-Dateien, sowie der verschiedenen Eintr\"{a}ge, +einschlie{\ss}lich der Datentypen, sehen Sie sich bitte das Kapitel \ilink{Konfiguration}{ConfigureChapter} an. + +Die folgenden Console-Konfigurations-Parameter m\"{u}ssen definiert werden: + +\section{Der Director-Eintrag} +\label{DirectorResource3} +\index[general]{Director Eintrag} +\index[general]{Eintrag!Director} + +Der Director-Eintrag enth\"{a}lt die notwendigen Parameter, um \"{u}ber das Console-Programm +Zugriff auf den Director-Dienst zu haben. Sie k\"{o}nnen mehrere Director-Dienste in dieser Datei angeben, +in dem Fall werden Sie beim starten der Console gefragt, zu welchem Director-Dienst Sie sich verbinden wollen. + +\begin{description} +\item [Director] + \index[console]{Director} + Beginn des Director-Eintrags. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + Der Name des Directors, wird nur zur Unterscheidung benutzt, wenn Sie mehrere Director-Dienste konfiguriert haben. + +\item [DIRPort = \lt{}port-number\gt{}] + \index[dir]{DIRPort} + gibt den Port an, auf dem der Director-Dienst l\"{a}uft. Wenn Sie die {\bf ./configure} Option + {\bf \verb:--:with-base-port} angegeben haben, wird dieser Wert schon entsprechend gesetzt sein. + Der Port mu{\ss} mit dem in der Director-Konfiguration angegebenen {\bf DIRport} identisch sein. + Standardm\"{a}{\ss}ig wird der Port 9101 verwendet, so dass dieser Parameter normalerweise nicht gesetzt ist. + +\item [Address = \lt{}address\gt{}] + \index[dir]{Address} + die Adresse ist ein Rechnername, eine absolute Adresse (FQDN) oder die IP-Adresse auf der der Director-Dienst l\"{a}uft. + +\item [Password = \lt{}password\gt{}] + \index[dir]{Password} + das Passwort, dass benutzt wird um die Console beim Director-Dienst zu autorisieren. + Das Passwort muss mit dem in der \ilink{Director-Konfiguration}{DirectorChapter} gesetzten Passwort identisch sein. +\end{description} + +Ein Beispiel eines Director-Eintrags in der Console-Konfigurations-Datei: + +\footnotesize +\begin{verbatim} +Director { + Name = HeadMan + address = rufus.cats.com + password = xyz1erploit +} +\end{verbatim} +\normalsize + +\section{Der ConsoleFont-Eintrag} +\index[general]{Eintrag!ConsoleFont} +\index[general]{ConsoleFont Eintrag} + +Der ConsoleFont-Konfigurations-Eintrag ist nur in der GNOME-Version des Console-Programms verf\"{u}gbar. +Er erlaubt Ihnen, die im Hauptfenster verwendete Schriftart auszuw\"{a}hlen. + +\begin{description} + +\item [ConsoleFont] + \index[console]{ConsoleFont} + Beginn des ConsoleFont-Eintrags. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + Der Name des ConsoleFont-Eintrags. + +\item [Font = \lt{}Pango Font Name\gt{}] + \index[console]{Font} + Dieser Wert gibt den Namen der Schriftart im Pango-Format an. + Ein Beispiel: + +\footnotesize +\begin{verbatim} +Font = "LucidaTypewriter 9" +\end{verbatim} +\normalsize + +\end{description} + +Vielen Dank an Phil Stracchino der diese Funktion in Bacula implementiert hat. + +Hier noch ein zweites Beispiel: +\footnotesize +\begin{verbatim} +ConsoleFont { + Name = Default + Font = "Monospace 10" +} +\end{verbatim} +\normalsize + +\section{Der Console-Eintrag} +\label{ConsoleResource} +\index[general]{Console-Eintrag} +\index[general]{Eintrag!Console} + +Seit der Bacula-Version 1.33 gibt es drei verschiedene Console-Typen, die der Administrator oder Benutzer +zur Verwaltung des Director-Dienstes verwenden kann. Diese drei verschiedenen Typen umfassen drei unterschiedliche +Sicherheitslevel. + +\begin{itemize} +\item Der erste Consolen-Typ ist die {\bf anonymous} oder {\bf default} Console, die alle Rechte hat. + F\"{u}r diesen Typ ist kein spezieller Eintrag notwendig, da das Passwort in der Director-Konfiguration angegeben wird. + Dieser Consolen-Typ war der erste, der in Versionen vor 1.33 vorhanden war und auch weiterhin zu Verf\"{u}gung steht. + Normalerweise wird diese Console von Administratoren benutzt. + +\item Der zweite Consolen-Typ, den es seit Version 1.33 gibt, ist die {\bf named} oder {\bf restricted} Console. + Diese Typ muss sowohl in der Director-Konfiguration, als auch in der Console-Konfigurations-Datei angegeben werden. + Beide Namen und Passw\"{o}rter m\"{u}{\ss}en dabei in beiden Konfigurations-Dateien \"{u}bereinstimmen. + + Dieser zweite Consolen-Typ hat absolut keine Rechte, au{\ss}er denen, die in der Director-Konfiguration + explizit zugewiesen werden. Die Director-Konfiguration legt also fest, was dieser Consolen-Typ darf. + + Damit k\"{o}nnen Sie also in der Director-Konfiguration diverse Consolen-Eintr\"{a}ge anlegen, + die jeweils unterschiedliche Passw\"{o}rter und Namen haben und diese dann verschiedenen Benutzern zuweisen, + die dann z.B. nur auf bestimmte Kommandos und Clients Zugriff haben. Standardm\"{a}{\ss}ig darf diese Console + \"{u}berhaupt nichts -- keine Kommandos ausf\"{u}hren, absolut nichts. Sie m\"{u}ssen die Berechtigung f\"{u}r + bestimmte Kommandos und Ressourcen in der Zugriffskontrollliste innerhalb der Director-Konfiguration erteilen. + Dadurch hat der Administrator gezielte Kontrolle dar\"{u}ber, was er der Console bzw. dem Benutzer erlaubt. + +\item Der dritte Consolen-Typ ist \"{a}hnlich des zweiten, auch er ben\"{o}tigt eine Definition in der + Director- und Consolen-Konfiguration. Bei diesem Typ ist es so, dass wenn der Consolen-Name, der im + Parameter {\bf Name =} definiert ist, identisch mit dem Client-Namen ist, dem Benutzer erlaubt wird, + das {\bf SetIP}-Kommando zu benutzen. Dieses Kommando erm\"{o}glicht dem Client, dem Director-Dienst + mitzuteilen, unter welcher IP-Adresse der Client momentan zu erreichen ist. Dadurch k\"{o}nnen Rechner, + die ihr Netzwerk mittels DHCP dynamisch konfigurieren, dem Director-Dienst ihre aktuelle IP-Adresse melden. + +\end{itemize} + +Der Consolen-Konfigurations-Eintrag ist optional, wenn er angegeben wird, haben Sie allerdings die M\"{o}glichkeit, +Zugriffskontrolllisten anzulegen, um die entsprechende Console in ihren Rechten einzuschr\"{a}nken. +Damit k\"{o}nnen Sie z.B. dem Benutzer nur Zugriff auf die Backup-Jobs seines Clients erlauben. + +Sie k\"{o}nnen beliebig viele Console-Eintr\"{a}ge in Ihrer Consolen-Konfiguration anlegen. Im allgemeinen wird dann +immer der erste Eintrag verwendet. Wenn Sie allerdings mehrere Director-Dienste, mit entsprechenden Eintr\"{a}gen in +Ihrer Consolen-Konfiguration, haben, m\"{u}ssen Sie beim starten der Console einen der Director-Dienste ausw\"{a}hlen. +Lesen Sie bitte auch die Beschreibung des "Director"-Parameters in der Console-Konfiguration, der weiter unten +beschrieben wird. + +\begin{description} +\item [Console] + \index[console]{Console} + Beginn des Console-Eintrags. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + Der Name der Console. Er wird benutzt um dieser Console in der Director-Konfiguration eine + Zugriffskontrollliste zuzuweisen. + +\item [Password = \lt{}password\gt{}] + \index[console]{Passwort} + Wenn Sie hier ein Passwort angegeben, wird das Passwort aus dem Director-Eintrag ignoriert. + Weiter unten finden Sie dazu Details. + +\item [Director = \lt{}director-resource-name\gt{}] + Falls dieser Parameter angegeben wird, kann dieser Consolen-Eintrag \"{u}ber ein Auswahlmen\"{u} + beim ersten starten der Console selektiert werden. Er bestimmt dann, mit welchen Namen und Passwort + sich das Console-Programm bei welchen Director-Dienst anmeldet. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[console]{Heartbeat Intervall} + \index[console]{Parameter!Heartbeat} + Dieser Parameter ist optional. Falls Sie ihn angeben, wird im Abstand des konfigurierten Intervalls (in Sekunden) + ein \elink{keepalive}{http://de.wikipedia.org/wiki/Keepalive} zum Director-Dienst geschickt. Es ist nur auf Betriebssystemen + implementiert, die die {\bf setsockopt} TCP\_KEEPIDLE unterst\"{u}tzen (Linux, ...). + Der Standardwert ist null, d.h. es werden keine keepalives gesendet. + +\end{description} + + +Ein Beispiel, wenn Sie folgendes in Ihrer Consolen-Konfigurations-Datei, bconsole.conf oder bwx-console.conf, +definieren: + +\footnotesize +\begin{verbatim} +Director { + Name = MyDirector + DIRport = 9101 + Address = myserver + Password = "XXXXXXXXXXX" # das dient hier nicht der Unkenntlichmachung. +} + + +Console { + Name = restricted-user + Password = "UntrustedUser" +} +\end{verbatim} +\normalsize + +wobei das Passwort im Director-Konfigurations-Eintrag bewu{\ss}t falsch gesetzt ist +und der Consolen-Eintrag einen Name besitzt, hier {\bf restricted-user}. Danach erstellen Sie in der Konfiguration +des Director-Dienstes, auf die der Benutzer keinen Zugriff hat, folgende Consolen: + +\footnotesize +\begin{verbatim} +Console { + Name = restricted-user + Password = "UntrustedUser" + JobACL = "Restricted Client Save" + ClientACL = restricted-client + StorageACL = main-storage + ScheduleACL = *all* + PoolACL = *all* + FileSetACL = "Restricted Client's FileSet" + CatalogACL = DefaultCatalog + CommandACL = run +} +\end{verbatim} +\normalsize + +dann wird der Benutzer beim Anmelden an den Director-Dienst als {\bf restricted-user} angemeldet. +Der Benutzer wird nur Zugriff auf Jobs mit dem Namen {\bf Restricted Client Save}, +auf den Client {\bf restricted-client}, +auf den Storage {\bf main-storage}, +auf jeden Zeitplan (Schedule) und auf jeden Pool, +ein FileSet namens {\bf Restricted Client's FileSet}, +den Katalog {\bf DefaultCatalog}, +sowie einzig und allein das Kommando {\bf run} haben. +Mit anderen Worten, dieser Benutzer ist sehr eingeschr\"{a}nkt in dem, was er mit der Console +sehen und tun kann. + +Das folgende Beispiel zeigt eine bconsole.conf-Datei, in der mehrere Director-Dienste, +sowie verschiedene Consolen-Eintr\"{a}ge, abh\"{a}ngig vom Director, zu sehen sind: + +\footnotesize +\begin{verbatim} +Director { + Name = MyDirector + DIRport = 9101 + Address = myserver + Password = "XXXXXXXXXXX" # das dient hier nicht der Unkenntlichmachung. +} + +Director { + Name = SecondDirector + DIRport = 9101 + Address = secondserver + Password = "XXXXXXXXXXX" # das dient hier nicht der Unkenntlichmachung. +} + +Console { + Name = restricted-user + Password = "UntrustedUser" + Director = MyDirector +} + +Console { + Name = restricted-user + Password = "A different UntrustedUser" + Director = SecondDirector +} +\end{verbatim} +\normalsize + +Der zweite Director-Dienst, benannt als "secondserver", k\"{o}nnte diese Konfiguration besitzen: + +\footnotesize +\begin{verbatim} +Console { + Name = restricted-user + Password = "A different UntrustedUser" + JobACL = "Restricted Client Save" + ClientACL = restricted-client + StorageACL = second-storage + ScheduleACL = *all* + PoolACL = *all* + FileSetACL = "Restricted Client's FileSet"might + CatalogACL = RestrictedCatalog + CommandACL = run, restore + WhereACL = "/" +} +\end{verbatim} +\normalsize + +Im Unterschied zum ersten Director-Dienst, darf der Benutzer hier, neben einem anderem Storage, auch +das Consolen-Kommando {\bf restore} ausf\"{u}hren (wobei er als {\bf Where} allerdings nur "/" angeben darf). + + +\section{Console-Kommandos} +\index[general]{Console-Kommandos} +\index[general]{Kommandos!Console} + +F\"{u}r mehr Details zum arbeiten mit der Console und ihrer Kommandos, +lesen Sie bitte das Kapitel \ilink{Bacula Console}{_ConsoleChapter} in diesem Handbuch. + +\section{Beispiel Console-Konfigurations-Datei} +\label{SampleConfiguration2} +\index[general]{Datei!Beispiel Console-Konfiguration} +\index[general]{Beispiel Console-Konfigurations-Datei} + +Dies k\"{o}nnte ein Beispiel f\"{u}r eine Console-Konfigurations-Datei sein: + +\footnotesize +\begin{verbatim} +# +# Bacula Console Configuration File +# +Director { + Name = HeadMan + address = "my_machine.my_domain.com" + Password = Console_password +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/install/critical.tex b/docs/manuals/de/install/critical.tex new file mode 100644 index 00000000..30462e39 --- /dev/null +++ b/docs/manuals/de/install/critical.tex @@ -0,0 +1,130 @@ +%% +%% + +\chapter{Critical Items to Implement Before Production} +\label{CriticalChapter} +\index[general]{Production!Critical Items to Implement Before } +\index[general]{Critical Items to Implement Before Production } + +We recommend you take your time before implementing a production a Bacula +backup system since Bacula is a rather complex program, and if you make a +mistake, you may suddenly find that you cannot restore your files in case +of a disaster. This is especially true if you have not previously used a +major backup product. + +If you follow the instructions in this chapter, you will have covered most of +the major problems that can occur. It goes without saying that if you ever +find that we have left out an important point, please inform us, so +that we can document it to the benefit of everyone. + +\label{Critical} +\section{Critical Items} +\index[general]{Critical Items } +\index[general]{Items!Critical } + +The following assumes that you have installed Bacula, you more or less +understand it, you have at least worked through the tutorial or have +equivalent experience, and that you have set up a basic production +configuration. If you haven't done the above, please do so and then come back +here. The following is a sort of checklist that points with perhaps a brief +explanation of why you should do it. In most cases, you will find the +details elsewhere in the manual. The order is more or less the order you +would use in setting up a production system (if you already are in +production, use the checklist anyway). + +\begin{itemize} +\item Test your tape drive for compatibility with Bacula by using the test + command in the \ilink{btape}{btape} program. +\item Better than doing the above is to walk through the nine steps in the + \ilink{Tape Testing}{TapeTestingChapter} chapter of the manual. It + may take you a bit of time, but it will eliminate surprises. +\item Test the end of tape handling of your tape drive by using the + fill command in the \ilink{btape}{btape} program. +\item If you are using a Linux 2.4 kernel, make sure that /lib/tls is disabled. Bacula + does not work with this library. See the second point under + \ilink{ Supported Operating Systems.}{SupportedOSes} +\item Do at least one restore of files. If you backup multiple OS types + (Linux, Solaris, HP, MacOS, FreeBSD, Win32, ...), + restore files from each system type. The + \ilink{Restoring Files}{RestoreChapter} chapter shows you how. +\item Write a bootstrap file to a separate system for each backup job. The + Write Bootstrap directive is described in the + \ilink{Director Configuration}{writebootstrap} chapter of the + manual, and more details are available in the + \ilink{Bootstrap File}{BootstrapChapter} chapter. Also, the default + bacula-dir.conf comes with a Write Bootstrap directive defined. This allows + you to recover the state of your system as of the last backup. +\item Backup your catalog. An example of this is found in the default + bacula-dir.conf file. The backup script is installed by default and + should handle any database, though you may want to make your own local + modifications. See also \ilink{Backing Up Your Bacula Database - + Security Considerations }{BackingUpBaculaSecurityConsiderations} for more + information. +\item Write a bootstrap file for the catalog. An example of this is found in + the default bacula-dir.conf file. This will allow you to quickly restore your + catalog in the event it is wiped out -- otherwise it is many excruciating + hours of work. +\item Make a copy of the bacula-dir.conf, bacula-sd.conf, and + bacula-fd.conf files that you are using on your server. Put it in a safe + place (on another machine) as these files can be difficult to + reconstruct if your server dies. +\item Make a Bacula Rescue CDROM! See the + \ilink{Disaster Recovery Using a Bacula Rescue + CDROM}{RescueChapter} chapter. It is trivial to make such a CDROM, + and it can make system recovery in the event of a lost hard disk infinitely + easier. +\item Bacula assumes all filenames are in UTF-8 format. This is important + when saving the filenames to the catalog. For Win32 machine, Bacula will + automatically convert from Unicode to UTF-8, but on Unix, Linux, *BSD, + and MacOS X machines, you must explicitly ensure that your locale is set + properly. Typically this means that the {bf LANG} environment variable + must end in {\bf .UTF-8}. An full example is {\bf en\_US.UTF-8}. The + exact syntax may vary a bit from OS to OS, and exactly how you define it + will also vary. + + On most modern Win32 machines, you can edit the conf files with {\bf + notebook} and choose output encoding UTF-8. +\end{itemize} + +\section{Recommended Items} +\index[general]{Items!Recommended } +\index[general]{Recommended Items } + +Although these items may not be critical, they are recommended and will help +you avoid problems. + +\begin{itemize} +\item Read the \ilink{Quick Start Guide to Bacula}{QuickStartChapter} +\item After installing and experimenting with Bacula, read and work carefully + through the examples in the + \ilink{Tutorial}{TutorialChapter} chapter of this manual. +\item Learn what each of the \ilink{Bacula Utility Programs}{_UtilityChapter} + does. +\item Set up reasonable retention periods so that your catalog does not grow + to be too big. See the following three chapters:\\ + \ilink{Recycling your Volumes}{RecyclingChapter},\\ + \ilink{Basic Volume Management}{DiskChapter},\\ + \ilink{Using Pools to Manage Volumes}{PoolsChapter}. +\item Perform a bare metal recovery using the Bacula Rescue CDROM. See the + \ilink{Disaster Recovery Using a Bacula Rescue CDROM}{RescueChapter} + chapter. +\end{itemize} + +If you absolutely must implement a system where you write a different +tape each night and take it offsite in the morning. We recommend that you do +several things: +\begin{itemize} +\item Write a bootstrap file of your backed up data and a bootstrap file + of your catalog backup to a floppy disk or a CDROM, and take that with + the tape. If this is not possible, try to write those files to another + computer or offsite computer, or send them as email to a friend. If none + of that is possible, at least print the bootstrap files and take that + offsite with the tape. Having the bootstrap files will make recovery + much easier. +\item It is better not to force Bacula to load a particular tape each day. + Instead, let Bacula choose the tape. If you need to know what tape to + mount, you can print a list of recycled and appendable tapes daily, and + select any tape from that list. Bacula may propose a particular tape + for use that it considers optimal, but it will accept any valid tape + from the correct pool. +\end{itemize} diff --git a/docs/manuals/de/install/dirdconf.tex b/docs/manuals/de/install/dirdconf.tex new file mode 100644 index 00000000..c823d640 --- /dev/null +++ b/docs/manuals/de/install/dirdconf.tex @@ -0,0 +1,3377 @@ +%% +%% + +\chapter{Configuring the Director} +\label{DirectorChapter} +\index[general]{Director!Configuring the} +\index[general]{Configuring the Director} + +Of all the configuration files needed to run {\bf Bacula}, the Director's is +the most complicated, and the one that you will need to modify the most often +as you add clients or modify the FileSets. + +For a general discussion of configuration files and resources including the +data types recognized by {\bf Bacula}. Please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. + +\section{Director Resource Types} +\index[general]{Types!Director Resource} +\index[general]{Director Resource Types} + +Director resource type may be one of the following: + +Job, JobDefs, Client, Storage, Catalog, Schedule, FileSet, Pool, Director, or +Messages. We present them here in the most logical order for defining them: + +Note, everything revolves around a job and is tied to a job in one +way or another. + +\begin{itemize} +\item + \ilink{Director}{DirectorResource4} -- to define the Director's + name and its access password used for authenticating the Console program. + Only a single Director resource definition may appear in the Director's + configuration file. If you have either {\bf /dev/random} or {\bf bc} on your + machine, Bacula will generate a random password during the configuration + process, otherwise it will be left blank. +\item + \ilink{Job}{JobResource} -- to define the backup/restore Jobs + and to tie together the Client, FileSet and Schedule resources to be used + for each Job. Normally, you will Jobs of different names corresponding + to each client (i.e. one Job per client, but a different one with a different name + for each client). +\item + \ilink{JobDefs}{JobDefsResource} -- optional resource for + providing defaults for Job resources. +\item + \ilink{Schedule}{ScheduleResource} -- to define when a Job is to + be automatically run by {\bf Bacula's} internal scheduler. You + may have any number of Schedules, but each job will reference only + one. +\item + \ilink{FileSet}{FileSetResource} -- to define the set of files + to be backed up for each Client. You may have any number of + FileSets but each Job will reference only one. +\item + \ilink{Client}{ClientResource2} -- to define what Client is to be + backed up. You will generally have multiple Client definitions. Each + Job will reference only a single client. +\item + \ilink{Storage}{StorageResource2} -- to define on what physical + device the Volumes should be mounted. You may have one or + more Storage definitions. +\item + \ilink{Pool}{PoolResource} -- to define the pool of Volumes + that can be used for a particular Job. Most people use a + single default Pool. However, if you have a large number + of clients or volumes, you may want to have multiple Pools. + Pools allow you to restrict a Job (or a Client) to use + only a particular set of Volumes. +\item + \ilink{Catalog}{CatalogResource} -- to define in what database to + keep the list of files and the Volume names where they are backed up. + Most people only use a single catalog. However, if you want to + scale the Director to many clients, multiple catalogs can be helpful. + Multiple catalogs require a bit more management because in general + you must know what catalog contains what data. Currently, all + Pools are defined in each catalog. This restriction will be removed + in a later release. +\item + \ilink{Messages}{MessagesChapter} -- to define where error and + information messages are to be sent or logged. You may define + multiple different message resources and hence direct particular + classes of messages to different users or locations (files, ...). +\end{itemize} + +\section{The Director Resource} +\label{DirectorResource4} +\index[general]{Director Resource} +\index[general]{Resource!Director} + +The Director resource defines the attributes of the Directors running on the +network. In the current implementation, there is only a single Director +resource, but the final design will contain multiple Directors to maintain +index and media database redundancy. + +\begin{description} + +\item [Director] + \index[dir]{Director} + Start of the Director resource. One and only one director resource must be +supplied. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The director name used by the system administrator. This directive is +required. + +\item [Description = \lt{}text\gt{}] + \index[dir]{Description} + \index[dir]{Directive!Description} + The text field contains a description of the Director that will be displayed +in the graphical user interface. This directive is optional. + +\item [Password = \lt{}UA-password\gt{}] + \index[dir]{Password} + \index[dir]{Directive!Password} + Specifies the password that must be supplied for the default Bacula + Console to be authorized. The same password must appear in the {\bf + Director} resource of the Console configuration file. For added + security, the password is never passed across the network but instead a + challenge response hash code created with the password. This directive + is required. If you have either {\bf /dev/random} or {\bf bc} on your + machine, Bacula will generate a random password during the configuration + process, otherwise it will be left blank and you must manually supply + it. + + The password is plain text. It is not generated through any special + process but as noted above, it is better to use random text for + security reasons. + +\item [Messages = \lt{}Messages-resource-name\gt{}] + \index[dir]{Messages} + \index[dir]{Directive!Messages} + The messages resource specifies where to deliver Director messages that are + not associated with a specific Job. Most messages are specific to a job and + will be directed to the Messages resource specified by the job. However, + there are a few messages that can occur when no job is running. This + directive is required. + +\item [Working Directory = \lt{}Directory\gt{}] + \index[dir]{Working Directory} + \index[dir]{Directive!Working Directory} + This directive is mandatory and specifies a directory in which the Director + may put its status files. This directory should be used only by Bacula but + may be shared by other Bacula daemons. However, please note, if this + directory is shared with other Bacula daemons (the File daemon and Storage + daemon), you must ensure that the {\bf Name} given to each daemon is + unique so that the temporary filenames used do not collide. By default + the Bacula configure process creates unique daemon names by postfixing them + with -dir, -fd, and -sd. Standard shell expansion of the {\bf + Directory} is done when the configuration file is read so that values such + as {\bf \$HOME} will be properly expanded. This directive is required. + The working directory specified must already exist and be + readable and writable by the Bacula daemon referencing it. + + If you have specified a Director user and/or a Director group on your + ./configure line with {\bf {-}{-}with-dir-user} and/or + {\bf {-}{-}with-dir-group} the Working Directory owner and group will + be set to those values. + +\item [Pid Directory = \lt{}Directory\gt{}] + \index[dir]{Pid Directory} + \index[dir]{Directive!Pid Directory} + This directive is mandatory and specifies a directory in which the Director + may put its process Id file. The process Id file is used to shutdown + Bacula and to prevent multiple copies of Bacula from running simultaneously. + Standard shell expansion of the {\bf Directory} is done when the + configuration file is read so that values such as {\bf \$HOME} will be + properly expanded. + + The PID directory specified must already exist and be + readable and writable by the Bacula daemon referencing it + + Typically on Linux systems, you will set this to: {\bf /var/run}. If you are + not installing Bacula in the system directories, you can use the {\bf Working + Directory} as defined above. This directive is required. + +\item [Scripts Directory = \lt{}Directory\gt{}] + \index[dir]{Scripts Directory} + \index[dir]{Directive!Scripts Directory} + This directive is optional and, if defined, specifies a directory in + which the Director will look for the Python startup script {\bf + DirStartup.py}. This directory may be shared by other Bacula daemons. + Standard shell expansion of the directory is done when the configuration + file is read so that values such as {\bf \$HOME} will be properly + expanded. + +\item [QueryFile = \lt{}Path\gt{}] + \index[dir]{QueryFile} + \index[dir]{Directive!QueryFile} + This directive is mandatory and specifies a directory and file in which + the Director can find the canned SQL statements for the {\bf Query} + command of the Console. Standard shell expansion of the {\bf Path} is + done when the configuration file is read so that values such as {\bf + \$HOME} will be properly expanded. This directive is required. + +\label{DirMaxConJobs} +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] +\index[dir]{Maximum Concurrent Jobs} +\index[dir]{Directive!Maximum Concurrent Jobs} +\index[general]{Simultaneous Jobs} +\index[general]{Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of total Director Jobs that + should run concurrently. The default is set to 1, but you may set it to a + larger number. + + Please note that the Volume format becomes much more complicated with + multiple simultaneous jobs, consequently, restores can take much longer if + Bacula must sort through interleaved volume blocks from multiple simultaneous + jobs. This can be avoided by having each simultaneously running job write to + a different volume or by using data spooling, which will first spool the data + to disk simultaneously, then write each spool file to the volume in + sequence. + + There may also still be some cases where directives such as {\bf Maximum + Volume Jobs} are not properly synchronized with multiple simultaneous jobs + (subtle timing issues can arise), so careful testing is recommended. + + At the current time, there is no configuration parameter set to limit the + number of console connections. A maximum of five simultaneous console + connections are permitted. + +\item [FD Connect Timeout = \lt{}time\gt{}] + \index[dir]{FD Connect Timeout} + \index[dir]{Directive!FD Connect Timeout} + where {\bf time} is the time that the Director should continue + attempting to contact the File daemon to start a job, and after which + the Director will cancel the job. The default is 30 minutes. + +\item [SD Connect Timeout = \lt{}time\gt{}] + \index[dir]{SD Connect Timeout} + \index[dir]{Directive!SD Connect Timeout} + where {\bf time} is the time that the Director should continue + attempting to contact the Storage daemon to start a job, and after which + the Director will cancel the job. The default is 30 minutes. + +\item [DirAddresses = \lt{}IP-address-specification\gt{}] + \index[dir]{DirAddresses} + \index[dir]{Address} + \index[general]{Address} + \index[dir]{Directive!DirAddresses} + Specify the ports and addresses on which the Director daemon will listen + for Bacula Console connections. Probably the simplest way to explain + this is to show an example: + +\footnotesize +\begin{verbatim} + DirAddresses = { + ip = { addr = 1.2.3.4; port = 1205;} + ipv4 = { + addr = 1.2.3.4; port = http;} + ipv6 = { + addr = 1.2.3.4; + port = 1205; + } + ip = { + addr = 1.2.3.4 + port = 1205 + } + ip = { addr = 1.2.3.4 } + ip = { addr = 201:220:222::2 } + ip = { + addr = bluedot.thun.net + } +} +\end{verbatim} +\normalsize + +where ip, ip4, ip6, addr, and port are all keywords. Note, that the address +can be specified as either a dotted quadruple, or IPv6 colon notation, or as +a symbolic name (only in the ip specification). Also, port can be specified +as a number or as the mnemonic value from the /etc/services file. If a port +is not specified, the default will be used. If an ip section is specified, +the resolution can be made either by IPv4 or IPv6. If ip4 is specified, then +only IPv4 resolutions will be permitted, and likewise with ip6. + +Please note that if you use the DirAddresses directive, you must +not use either a DirPort or a DirAddress directive in the same +resource. + +\item [DirPort = \lt{}port-number\gt{}] + \index[dir]{DirPort} + \index[dir]{Directive!DirPort} + Specify the port (a positive integer) on which the Director daemon will + listen for Bacula Console connections. This same port number must be + specified in the Director resource of the Console configuration file. The + default is 9101, so normally this directive need not be specified. This + directive should not be used if you specify DirAddresses (not plural) + directive. + +\item [DirAddress = \lt{}IP-Address\gt{}] + \index[dir]{DirAddress} + \index[dir]{Directive!DirAddress} + This directive is optional, but if it is specified, it will cause the + Director server (for the Console program) to bind to the specified {\bf + IP-Address}, which is either a domain name or an IP address specified as a + dotted quadruple in string or quoted string format. If this directive is not + specified, the Director will bind to any available address (the default). + Note, unlike the DirAddresses specification noted above, this directive only + permits a single address to be specified. This directive should not be used if you + specify a DirAddresses (note plural) directive. + + + +\end{description} + +The following is an example of a valid Director resource definition: + +\footnotesize +\begin{verbatim} +Director { + Name = HeadMan + WorkingDirectory = "$HOME/bacula/bin/working" + Password = UA_password + PidDirectory = "$HOME/bacula/bin/working" + QueryFile = "$HOME/bacula/bin/query.sql" + Messages = Standard +} +\end{verbatim} +\normalsize + +\section{The Job Resource} +\label{JobResource} +\index[general]{Resource!Job} +\index[general]{Job Resource} + +The Job resource defines a Job (Backup, Restore, ...) that Bacula must +perform. Each Job resource definition contains the name of a Client and +a FileSet to backup, the Schedule for the Job, where the data +are to be stored, and what media Pool can be used. In effect, each Job +resource must specify What, Where, How, and When or FileSet, Storage, +Backup/Restore/Level, and Schedule respectively. Note, the FileSet must +be specified for a restore job for historical reasons, but it is no longer used. + +Only a single type ({\bf Backup}, {\bf Restore}, ...) can be specified for any +job. If you want to backup multiple FileSets on the same Client or multiple +Clients, you must define a Job for each one. + +Note, you define only a single Job to do the Full, Differential, and +Incremental backups since the different backup levels are tied together by +a unique Job name. Normally, you will have only one Job per Client, but +if a client has a really huge number of files (more than several million), +you might want to split it into to Jobs each with a different FileSet +covering only part of the total files. + + +\begin{description} + +\item [Job] + \index[dir]{Job} + \index[dir]{Directive!Job} + Start of the Job resource. At least one Job resource is required. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The Job name. This name can be specified on the {\bf Run} command in the + console program to start a job. If the name contains spaces, it must be + specified between quotes. It is generally a good idea to give your job the + same name as the Client that it will backup. This permits easy + identification of jobs. + + When the job actually runs, the unique Job Name will consist of the name you + specify here followed by the date and time the job was scheduled for + execution. This directive is required. + +\item [Enabled = \lt{}yes|no\gt{}] + \index[dir]{Enable} + \index[dir]{Directive!Enable} + This directive allows you to enable or disable automatic execution + via the scheduler of a Job. + +\item [Type = \lt{}job-type\gt{}] + \index[dir]{Type} + \index[dir]{Directive!Type} + The {\bf Type} directive specifies the Job type, which may be one of the + following: {\bf Backup}, {\bf Restore}, {\bf Verify}, or {\bf Admin}. This + directive is required. Within a particular Job Type, there are also Levels + as discussed in the next item. + +\begin{description} + +\item [Backup] + \index[dir]{Backup} + Run a backup Job. Normally you will have at least one Backup job for each + client you want to save. Normally, unless you turn off cataloging, most all + the important statistics and data concerning files backed up will be placed + in the catalog. + +\item [Restore] + \index[dir]{Restore} + Run a restore Job. Normally, you will specify only one Restore job + which acts as a sort of prototype that you will modify using the console + program in order to perform restores. Although certain basic + information from a Restore job is saved in the catalog, it is very + minimal compared to the information stored for a Backup job -- for + example, no File database entries are generated since no Files are + saved. + + {\bf Restore} jobs cannot be + automatically started by the scheduler as is the case for Backup, Verify + and Admin jobs. To restore files, you must use the {\bf restore} command + in the console. + + +\item [Verify] + \index[dir]{Verify} + Run a verify Job. In general, {\bf verify} jobs permit you to compare the + contents of the catalog to the file system, or to what was backed up. In + addition, to verifying that a tape that was written can be read, you can + also use {\bf verify} as a sort of tripwire intrusion detection. + +\item [Admin] + \index[dir]{Admin} + Run an admin Job. An {\bf Admin} job can be used to periodically run catalog + pruning, if you do not want to do it at the end of each {\bf Backup} Job. + Although an Admin job is recorded in the catalog, very little data is saved. +\end{description} + +\label{Level} + +\item [Level = \lt{}job-level\gt{}] +\index[dir]{Level} +\index[dir]{Directive!Level} + The Level directive specifies the default Job level to be run. Each + different Job Type (Backup, Restore, ...) has a different set of Levels + that can be specified. The Level is normally overridden by a different + value that is specified in the {\bf Schedule} resource. This directive + is not required, but must be specified either by a {\bf Level} directive + or as an override specified in the {\bf Schedule} resource. + +For a {\bf Backup} Job, the Level may be one of the following: + +\begin{description} + +\item [Full] +\index[dir]{Full} + When the Level is set to Full all files in the FileSet whether or not + they have changed will be backed up. + +\item [Incremental] + \index[dir]{Incremental} + When the Level is set to Incremental all files specified in the FileSet + that have changed since the last successful backup of the the same Job + using the same FileSet and Client, will be backed up. If the Director + cannot find a previous valid Full backup then the job will be upgraded + into a Full backup. When the Director looks for a valid backup record + in the catalog database, it looks for a previous Job with: + +\begin{itemize} +\item The same Job name. +\item The same Client name. +\item The same FileSet (any change to the definition of the FileSet such as + adding or deleting a file in the Include or Exclude sections constitutes a + different FileSet. +\item The Job was a Full, Differential, or Incremental backup. +\item The Job terminated normally (i.e. did not fail or was not canceled). +\end{itemize} + + If all the above conditions do not hold, the Director will upgrade the + Incremental to a Full save. Otherwise, the Incremental backup will be + performed as requested. + + The File daemon (Client) decides which files to backup for an + Incremental backup by comparing start time of the prior Job (Full, + Differential, or Incremental) against the time each file was last + "modified" (st\_mtime) and the time its attributes were last + "changed"(st\_ctime). If the file was modified or its attributes + changed on or after this start time, it will then be backed up. + + Some virus scanning software may change st\_ctime while + doing the scan. For example, if the virus scanning program attempts to + reset the access time (st\_atime), which Bacula does not use, it will + cause st\_ctime to change and hence Bacula will backup the file during + an Incremental or Differential backup. In the case of Sophos virus + scanning, you can prevent it from resetting the access time (st\_atime) + and hence changing st\_ctime by using the {\bf \verb:--:no-reset-atime} + option. For other software, please see their manual. + + When Bacula does an Incremental backup, all modified files that are + still on the system are backed up. However, any file that has been + deleted since the last Full backup remains in the Bacula catalog, which + means that if between a Full save and the time you do a restore, some + files are deleted, those deleted files will also be restored. The + deleted files will no longer appear in the catalog after doing another + Full save. However, to remove deleted files from the catalog during an + Incremental backup is quite a time consuming process and not currently + implemented in Bacula. + + In addition, if you move a directory rather than copy it, the files in + it do not have their modification time (st\_mtime) or their attribute + change time (st\_ctime) changed. As a consequence, those files will + probably not be backed up by an Incremental or Differential backup which + depend solely on these time stamps. If you move a directory, and wish + it to be properly backed up, it is generally preferable to copy it, then + delete the original. + +\item [Differential] + \index[dir]{Differential} + When the Level is set to Differential + all files specified in the FileSet that have changed since the last + successful Full backup of the same Job will be backed up. + If the Director cannot find a + valid previous Full backup for the same Job, FileSet, and Client, + backup, then the Differential job will be upgraded into a Full backup. + When the Director looks for a valid Full backup record in the catalog + database, it looks for a previous Job with: + +\begin{itemize} +\item The same Job name. +\item The same Client name. +\item The same FileSet (any change to the definition of the FileSet such as + adding or deleting a file in the Include or Exclude sections constitutes a + different FileSet. +\item The Job was a FULL backup. +\item The Job terminated normally (i.e. did not fail or was not canceled). +\end{itemize} + + If all the above conditions do not hold, the Director will upgrade the + Differential to a Full save. Otherwise, the Differential backup will be + performed as requested. + + The File daemon (Client) decides which files to backup for a + differential backup by comparing the start time of the prior Full backup + Job against the time each file was last "modified" (st\_mtime) and the + time its attributes were last "changed" (st\_ctime). If the file was + modified or its attributes were changed on or after this start time, it + will then be backed up. The start time used is displayed after the {\bf + Since} on the Job report. In rare cases, using the start time of the + prior backup may cause some files to be backed up twice, but it ensures + that no change is missed. As with the Incremental option, you should + ensure that the clocks on your server and client are synchronized or as + close as possible to avoid the possibility of a file being skipped. + Note, on versions 1.33 or greater Bacula automatically makes the + necessary adjustments to the time between the server and the client so + that the times Bacula uses are synchronized. + + When Bacula does a Differential backup, all modified files that are + still on the system are backed up. However, any file that has been + deleted since the last Full backup remains in the Bacula catalog, which + means that if between a Full save and the time you do a restore, some + files are deleted, those deleted files will also be restored. The + deleted files will no longer appear in the catalog after doing another + Full save. However, to remove deleted files from the catalog during a + Differential backup is quite a time consuming process and not currently + implemented in Bacula. It is, however, a planned future feature. + + As noted above, if you move a directory rather than copy it, the + files in it do not have their modification time (st\_mtime) or + their attribute change time (st\_ctime) changed. As a + consequence, those files will probably not be backed up by an + Incremental or Differential backup which depend solely on these + time stamps. If you move a directory, and wish it to be + properly backed up, it is generally preferable to copy it, then + delete the original. Alternatively, you can move the directory, then + use the {\bf touch} program to update the timestamps. + + Every once and a while, someone asks why we need Differential + backups as long as Incremental backups pickup all changed files. + There are possibly many answers to this question, but the one + that is the most important for me is that a Differential backup + effectively merges + all the Incremental and Differential backups since the last Full backup + into a single Differential backup. This has two effects: 1. It gives + some redundancy since the old backups could be used if the merged backup + cannot be read. 2. More importantly, it reduces the number of Volumes + that are needed to do a restore effectively eliminating the need to read + all the volumes on which the preceding Incremental and Differential + backups since the last Full are done. + +\end{description} + +For a {\bf Restore} Job, no level needs to be specified. + +For a {\bf Verify} Job, the Level may be one of the following: + +\begin{description} + +\item [InitCatalog] +\index[dir]{InitCatalog} + does a scan of the specified {\bf FileSet} and stores the file + attributes in the Catalog database. Since no file data is saved, you + might ask why you would want to do this. It turns out to be a very + simple and easy way to have a {\bf Tripwire} like feature using {\bf + Bacula}. In other words, it allows you to save the state of a set of + files defined by the {\bf FileSet} and later check to see if those files + have been modified or deleted and if any new files have been added. + This can be used to detect system intrusion. Typically you would + specify a {\bf FileSet} that contains the set of system files that + should not change (e.g. /sbin, /boot, /lib, /bin, ...). Normally, you + run the {\bf InitCatalog} level verify one time when your system is + first setup, and then once again after each modification (upgrade) to + your system. Thereafter, when your want to check the state of your + system files, you use a {\bf Verify} {\bf level = Catalog}. This + compares the results of your {\bf InitCatalog} with the current state of + the files. + +\item [Catalog] +\index[dir]{Catalog} + Compares the current state of the files against the state previously + saved during an {\bf InitCatalog}. Any discrepancies are reported. The + items reported are determined by the {\bf verify} options specified on + the {\bf Include} directive in the specified {\bf FileSet} (see the {\bf + FileSet} resource below for more details). Typically this command will + be run once a day (or night) to check for any changes to your system + files. + + Please note! If you run two Verify Catalog jobs on the same client at + the same time, the results will certainly be incorrect. This is because + Verify Catalog modifies the Catalog database while running in order to + track new files. + +\item [VolumeToCatalog] +\index[dir]{VolumeToCatalog} + This level causes Bacula to read the file attribute data written to the + Volume from the last Job. The file attribute data are compared to the + values saved in the Catalog database and any differences are reported. + This is similar to the {\bf Catalog} level except that instead of + comparing the disk file attributes to the catalog database, the + attribute data written to the Volume is read and compared to the catalog + database. Although the attribute data including the signatures (MD5 or + SHA1) are compared, the actual file data is not compared (it is not in + the catalog). + + Please note! If you run two Verify VolumeToCatalog jobs on the same + client at the same time, the results will certainly be incorrect. This + is because the Verify VolumeToCatalog modifies the Catalog database + while running. + +\item [DiskToCatalog] +\index[dir]{DiskToCatalog} + This level causes Bacula to read the files as they currently are on + disk, and to compare the current file attributes with the attributes + saved in the catalog from the last backup for the job specified on the + {\bf VerifyJob} directive. This level differs from the {\bf Catalog} + level described above by the fact that it doesn't compare against a + previous Verify job but against a previous backup. When you run this + level, you must supply the verify options on your Include statements. + Those options determine what attribute fields are compared. + + This command can be very useful if you have disk problems because it + will compare the current state of your disk against the last successful + backup, which may be several jobs. + + Note, the current implementation (1.32c) does not identify files that + have been deleted. +\end{description} + +\item [Verify Job = \lt{}Job-Resource-Name\gt{}] + \index[dir]{Verify Job} + \index[dir]{Directive!Verify Job} + If you run a verify job without this directive, the last job run will be + compared with the catalog, which means that you must immediately follow + a backup by a verify command. If you specify a {\bf Verify Job} Bacula + will find the last job with that name that ran. This permits you to run + all your backups, then run Verify jobs on those that you wish to be + verified (most often a {\bf VolumeToCatalog}) so that the tape just + written is re-read. + +\item [JobDefs = \lt{}JobDefs-Resource-Name\gt{}] +\index[dir]{JobDefs} +\index[dir]{Directive!JobDefs} + If a JobDefs-Resource-Name is specified, all the values contained in the + named JobDefs resource will be used as the defaults for the current Job. + Any value that you explicitly define in the current Job resource, will + override any defaults specified in the JobDefs resource. The use of + this directive permits writing much more compact Job resources where the + bulk of the directives are defined in one or more JobDefs. This is + particularly useful if you have many similar Jobs but with minor + variations such as different Clients. A simple example of the use of + JobDefs is provided in the default bacula-dir.conf file. + +\item [Bootstrap = \lt{}bootstrap-file\gt{}] +\index[dir]{Bootstrap} +\index[dir]{Directive!Bootstrap} + The Bootstrap directive specifies a bootstrap file that, if provided, + will be used during {\bf Restore} Jobs and is ignored in other Job + types. The {\bf bootstrap} file contains the list of tapes to be used + in a restore Job as well as which files are to be restored. + Specification of this directive is optional, and if specified, it is + used only for a restore job. In addition, when running a Restore job + from the console, this value can be changed. + + If you use the {\bf Restore} command in the Console program, to start a + restore job, the {\bf bootstrap} file will be created automatically from + the files you select to be restored. + + For additional details of the {\bf bootstrap} file, please see + \ilink{Restoring Files with the Bootstrap File}{BootstrapChapter} chapter + of this manual. + +\label{writebootstrap} +\item [Write Bootstrap = \lt{}bootstrap-file-specification\gt{}] +\index[dir]{Write Bootstrap} +\index[dir]{Directive!Write Bootstrap} + The {\bf writebootstrap} directive specifies a file name where Bacula + will write a {\bf bootstrap} file for each Backup job run. This + directive applies only to Backup Jobs. If the Backup job is a Full + save, Bacula will erase any current contents of the specified file + before writing the bootstrap records. If the Job is an Incremental + or Differential + save, Bacula will append the current bootstrap record to the end of the + file. + + Using this feature, permits you to constantly have a bootstrap file that + can recover the current state of your system. Normally, the file + specified should be a mounted drive on another machine, so that if your + hard disk is lost, you will immediately have a bootstrap record + available. Alternatively, you should copy the bootstrap file to another + machine after it is updated. Note, it is a good idea to write a separate + bootstrap file for each Job backed up including the job that backs up + your catalog database. + + If the {\bf bootstrap-file-specification} begins with a vertical bar + (|), Bacula will use the specification as the name of a program to which + it will pipe the bootstrap record. It could for example be a shell + script that emails you the bootstrap record. + + On versions 1.39.22 or greater, before opening the file or executing the + specified command, Bacula performs + \ilink{character substitution}{character substitution} like in RunScript + directive. To automatically manage your bootstrap files, you can use + this in your {\bf JobDefs} resources: +\begin{verbatim} +JobDefs { + Write Bootstrap = "%c_%n.bsr" + ... +} +\end{verbatim} + + For more details on using this file, please see the chapter entitled + \ilink{The Bootstrap File}{BootstrapChapter} of this manual. + +\item [Client = \lt{}client-resource-name\gt{}] +\index[dir]{Client} +\index[dir]{Directive!Client} + The Client directive specifies the Client (File daemon) that will be used in + the current Job. Only a single Client may be specified in any one Job. The + Client runs on the machine to be backed up, and sends the requested files to + the Storage daemon for backup, or receives them when restoring. For + additional details, see the + \ilink{Client Resource section}{ClientResource2} of this chapter. + This directive is required. + +\item [FileSet = \lt{}FileSet-resource-name\gt{}] +\index[dir]{FileSet} +\index[dir]{FileSet} + The FileSet directive specifies the FileSet that will be used in the + current Job. The FileSet specifies which directories (or files) are to + be backed up, and what options to use (e.g. compression, ...). Only a + single FileSet resource may be specified in any one Job. For additional + details, see the \ilink{FileSet Resource section}{FileSetResource} of + this chapter. This directive is required. + +\item [Messages = \lt{}messages-resource-name\gt{}] +\index[dir]{Messages} +\index[dir]{Directive!Messages} + The Messages directive defines what Messages resource should be used for + this job, and thus how and where the various messages are to be + delivered. For example, you can direct some messages to a log file, and + others can be sent by email. For additional details, see the + \ilink{Messages Resource}{MessagesChapter} Chapter of this manual. This + directive is required. + +\item [Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Pool} +\index[dir]{Directive!Pool} + The Pool directive defines the pool of Volumes where your data can be + backed up. Many Bacula installations will use only the {\bf Default} + pool. However, if you want to specify a different set of Volumes for + different Clients or different Jobs, you will probably want to use + Pools. For additional details, see the \ilink{Pool Resource + section}{PoolResource} of this chapter. This directive is required. + +\item [Full Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Full Backup Pool} +\index[dir]{Directive!Full Backup Pool} + The {\it Full Backup Pool} specifies a Pool to be used for Full backups. + It will override any Pool specification during a Full backup. This + directive is optional. + +\item [Differential Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Differential Backup Pool} +\index[dir]{Directive!Differential Backup Pool} + The {\it Differential Backup Pool} specifies a Pool to be used for + Differential backups. It will override any Pool specification during a + Differential backup. This directive is optional. + +\item [Incremental Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Incremental Backup Pool} +\index[dir]{Directive!Incremental Backup Pool} + The {\it Incremental Backup Pool} specifies a Pool to be used for + Incremental backups. It will override any Pool specification during an + Incremental backup. This directive is optional. + +\item [Schedule = \lt{}schedule-name\gt{}] +\index[dir]{Schedule} +\index[dir]{Directive!Schedule} + The Schedule directive defines what schedule is to be used for the Job. + The schedule in turn determines when the Job will be automatically + started and what Job level (i.e. Full, Incremental, ...) is to be run. + This directive is optional, and if left out, the Job can only be started + manually using the Console program. Although you may specify only a + single Schedule resource for any one job, the Schedule resource may + contain multiple {\bf Run} directives, which allow you to run the Job at + many different times, and each {\bf run} directive permits overriding + the default Job Level Pool, Storage, and Messages resources. This gives + considerable flexibility in what can be done with a single Job. For + additional details, see the \ilink{Schedule Resource + Chapter}{ScheduleResource} of this manual. + + +\item [Storage = \lt{}storage-resource-name\gt{}] +\index[dir]{Storage} +\index[dir]{Directive!Storage} + The Storage directive defines the name of the storage services where you + want to backup the FileSet data. For additional details, see the + \ilink{Storage Resource Chapter}{StorageResource2} of this manual. + The Storage resource may also be specified in the Job's Pool resource, + in which case the value in the Pool resource overrides any value + in the Job. This Storage resource definition is not required by either + the Job resource or in the Pool, but it must be specified in + one or the other, if not an error will result. + +\item [Max Start Delay = \lt{}time\gt{}] +\index[dir]{Max Start Delay} +\index[dir]{Directive!Max Start Delay} + The time specifies the maximum delay between the scheduled time and the + actual start time for the Job. For example, a job can be scheduled to + run at 1:00am, but because other jobs are running, it may wait to run. + If the delay is set to 3600 (one hour) and the job has not begun to run + by 2:00am, the job will be canceled. This can be useful, for example, + to prevent jobs from running during day time hours. The default is 0 + which indicates no limit. + +\item [Max Run Time = \lt{}time\gt{}] +\index[dir]{Max Run Time} +\index[dir]{Directive!Max Run Time} + The time specifies the maximum allowed time that a job may run, counted + from when the job starts, ({\bf not} necessarily the same as when the + job was scheduled). This directive is implemented in version 1.33 and + later. + +\item [Max Wait Time = \lt{}time\gt{}] +\index[dir]{Max Wait Time} +\index[dir]{Directive!Max Wait Time} + The time specifies the maximum allowed time that a job may block waiting + for a resource (such as waiting for a tape to be mounted, or waiting for + the storage or file daemons to perform their duties), counted from the + when the job starts, ({\bf not} necessarily the same as when the job was + scheduled). This directive is implemented only in version 1.33 and + later. + +\item [Incremental Max Wait Time = \lt{}time\gt{}] +\index[dir]{Incremental Max Wait Time} +\index[dir]{Directive!Incremental Max Wait Time} + The time specifies the maximum allowed time that an Incremental backup + job may block waiting for a resource (such as waiting for a tape to be + mounted, or waiting for the storage or file daemons to perform their + duties), counted from the when the job starts, ({\bf not} necessarily + the same as when the job was scheduled). Please note that if there is a + {\bf Max Wait Time} it may also be applied to the job. + +\item [Differential Max Wait Time = \lt{}time\gt{}] +\index[dir]{Differential Max Wait Time} +\index[dir]{Directive!Differential Max Wait Time} + The time specifies the maximum allowed time that a Differential backup + job may block waiting for a resource (such as waiting for a tape to be + mounted, or waiting for the storage or file daemons to perform their + duties), counted from the when the job starts, ({\bf not} necessarily + the same as when the job was scheduled). Please note that if there is a + {\bf Max Wait Time} it may also be applied to the job. + +\label{PreferMountedVolumes} +\item [Prefer Mounted Volumes = \lt{}yes|no\gt{}] +\index[dir]{Prefer Mounted Volumes} +\index[dir]{Directive!Prefer Mounted Volumes} + If the Prefer Mounted Volumes directive is set to {\bf yes} (default + yes), the Storage daemon is requested to select either an Autochanger or + a drive with a valid Volume already mounted in preference to a drive + that is not ready. This means that all jobs will attempt to append + to the same Volume (providing the Volume is appropriate -- right Pool, + ... for that job). If no drive with a suitable Volume is available, it + will select the first available drive. Note, any Volume that has + been requested to be mounted, will be considered valid as a mounted + volume by another job. This if multiple jobs start at the same time + and they all prefer mounted volumes, the first job will request the + mount, and the other jobs will use the same volume. + + If the directive is set to {\bf no}, the Storage daemon will prefer + finding an unused drive, otherwise, each job started will append to the + same Volume (assuming the Pool is the same for all jobs). Setting + Prefer Mounted Volumes to no can be useful for those sites + with multiple drive autochangers that prefer to maximize backup + throughput at the expense of using additional drives and Volumes. + This means that the job will prefer to use an unused drive rather + than use a drive that is already in use. + +\item [Prune Jobs = \lt{}yes|no\gt{}] +\index[dir]{Prune Jobs} +\index[dir]{Directive!Prune Jobs} + Normally, pruning of Jobs from the Catalog is specified on a Client by + Client basis in the Client resource with the {\bf AutoPrune} directive. + If this directive is specified (not normally) and the value is {\bf + yes}, it will override the value specified in the Client resource. The + default is {\bf no}. + + +\item [Prune Files = \lt{}yes|no\gt{}] +\index[dir]{Prune Files} +\index[dir]{Directive!Prune Files} + Normally, pruning of Files from the Catalog is specified on a Client by + Client basis in the Client resource with the {\bf AutoPrune} directive. + If this directive is specified (not normally) and the value is {\bf + yes}, it will override the value specified in the Client resource. The + default is {\bf no}. + +\item [Prune Volumes = \lt{}yes|no\gt{}] +\index[dir]{Prune Volumes} +\index[dir]{Directive!Prune Volumes} + Normally, pruning of Volumes from the Catalog is specified on a Client + by Client basis in the Client resource with the {\bf AutoPrune} + directive. If this directive is specified (not normally) and the value + is {\bf yes}, it will override the value specified in the Client + resource. The default is {\bf no}. + +\item [RunScript \{\lt{}body-of-runscript\gt{}\}] + \index[dir]{RunScript} + \index[dir]{Directive!Run Script} + + This directive is implemented in version 1.39.22 and later. + The RunScript directive behaves like a resource in that it + requires opening and closing braces around a number of directives + that make up the body of the runscript. + + The specified {\bf Command} (see below for details) is run as an + external program prior or after the current Job. This is optional. + + You can use following options may be specified in the body + of the runscript:\\ + +\begin{tabular}{|c|c|c|l} +Options & Value & Default & Information \\ +\hline +\hline +Runs On Success & Yes/No & {\it Yes} & Run command if JobStatus is successful\\ +\hline +Runs On Failure & Yes/No & {\it No} & Run command if JobStatus isn't successful\\ +\hline +Runs On Client & Yes/No & {\it Yes} & Run command on client\\ +\hline +Runs When & Before|After|Always & {\it Never} & When run commands\\ +\hline +Fail Job On Error & Yes/No & {\it Yes} & Fail job if script returns + something different from 0 \\ +\hline +Command & & & Path to your script\\ +\hline +\end{tabular} + \\ + + Any output sent by the command to standard output will be included in the + Bacula job report. The command string must be a valid program name or name + of a shell script. + + In addition, the command string is parsed then fed to the OS, + which means that the path will be searched to execute your specified + command, but there is no shell interpretation, as a consequence, if you + invoke complicated commands or want any shell features such as redirection + or piping, you must call a shell script and do it inside that script. + + Before submitting the specified command to the operating system, Bacula + performs character substitution of the following characters: + +\label{character substitution} +\footnotesize +\begin{verbatim} + %% = % + %c = Client's name + %d = Director's name + %e = Job Exit Status + %i = JobId + %j = Unique Job id + %l = Job Level + %n = Job name + %s = Since time + %t = Job type (Backup, ...) + %v = Volume name + +\end{verbatim} +\normalsize + +The Job Exit Status code \%e edits the following values: + +\index[dir]{Exit Status} +\begin{itemize} +\item OK +\item Error +\item Fatal Error +\item Canceled +\item Differences +\item Unknown term code +\end{itemize} + + Thus if you edit it on a command line, you will need to enclose + it within some sort of quotes. + + +You can use these following shortcuts:\\ + +\begin{tabular}{|c|c|c|c|c|c} +Keyword & RunsOnSuccess & RunsOnFailure & FailJobOnError & Runs On Client & RunsWhen \\ +\hline +Run Before Job & & & Yes & No & Before \\ +\hline +Run After Job & Yes & No & & No & After \\ +\hline +Run After Failed Job & No & Yes & & No & After \\ +\hline +Client Run Before Job & & & Yes & Yes & Before \\ +\hline +Client Run After Job & Yes & No & & Yes & After \\ +\end{tabular} + +Examples: +\begin{verbatim} +RunScript { + RunsWhen = Before + FailJobOnError = No + Command = "/etc/init.d/apache stop" +} + +RunScript { + RunsWhen = After + RunsOnFailure = yes + Command = "/etc/init.d/apache start" +} +\end{verbatim} + + {\bf Special Windows Considerations} + + In addition, for a Windows client on version 1.33 and above, please take + note that you must ensure a correct path to your script. The script or + program can be a .com, .exe or a .bat file. If you just put the program + name in then Bacula will search using the same rules that cmd.exe uses + (current directory, Bacula bin directory, and PATH). It will even try the + different extensions in the same order as cmd.exe. + The command can be anything that cmd.exe or command.com will recognize + as an executable file. + + However, if you have slashes in the program name then Bacula figures you + are fully specifying the name, so you must also explicitly add the three + character extension. + + The command is run in a Win32 environment, so Unix like commands will not + work unless you have installed and properly configured Cygwin in addition + to and separately from Bacula. + + The System \%Path\% will be searched for the command. (under the + environment variable dialog you have have both System Environment and + User Environment, we believe that only the System environment will be + available to bacula-fd, if it is running as a service.) + + System environment variables can be referenced with \%var\% and + used as either part of the command name or arguments. + + So if you have a script in the Bacula\\bin directory then the following lines + should work fine: + +\footnotesize +\begin{verbatim} + Client Run Before Job = systemstate +or + Client Run Before Job = systemstate.bat +or + Client Run Before Job = "systemstate" +or + Client Run Before Job = "systemstate.bat" +or + ClientRunBeforeJob = "\"C:/Program Files/Bacula/systemstate.bat\"" +\end{verbatim} +\normalsize + +The outer set of quotes is removed when the configuration file is parsed. +You need to escape the inner quotes so that they are there when the code +that parses the command line for execution runs so it can tell what the +program name is. + + +\footnotesize +\begin{verbatim} +ClientRunBeforeJob = "\"C:/Program Files/Software + Vendor/Executable\" /arg1 /arg2 \"foo bar\"" +\end{verbatim} +\normalsize + + The special characters +\begin{verbatim} +&<>()@^| +\end{verbatim} + will need to be quoted, + if they are part of a filename or argument. + + If someone is logged in, a blank "command" window running the commands + will be present during the execution of the command. + + Some Suggestions from Phil Stracchino for running on Win32 machines with + the native Win32 File daemon: + + \begin{enumerate} + \item You might want the ClientRunBeforeJob directive to specify a .bat + file which runs the actual client-side commands, rather than trying + to run (for example) regedit /e directly. + \item The batch file should explicitly 'exit 0' on successful completion. + \item The path to the batch file should be specified in Unix form: + + ClientRunBeforeJob = "c:/bacula/bin/systemstate.bat" + + rather than DOS/Windows form: + + ClientRunBeforeJob = + +"c:\textbackslash{}bacula\textbackslash{}bin\textbackslash{}systemstate.bat" + INCORRECT + \end{enumerate} + +For Win32, please note that there are certain limitations: + +ClientRunBeforeJob = "C:/Program Files/Bacula/bin/pre-exec.bat" + +Lines like the above do not work because there are limitations of +cmd.exe that is used to execute the command. +Bacula prefixes the string you supply with {\bf cmd.exe /c }. To test that +your command works you should type {\bf cmd /c "C:/Program Files/test.exe"} at a +cmd prompt and see what happens. Once the command is correct insert a +backslash (\textbackslash{}) before each double quote ("), and +then put quotes around the whole thing when putting it in +the director's .conf file. You either need to have only one set of quotes +or else use the short name and don't put quotes around the command path. + +Below is the output from cmd's help as it relates to the command line +passed to the /c option. + + + If /C or /K is specified, then the remainder of the command line after + the switch is processed as a command line, where the following logic is + used to process quote (") characters: + +\begin{enumerate} +\item + If all of the following conditions are met, then quote characters + on the command line are preserved: + \begin{itemize} + \item no /S switch. + \item exactly two quote characters. + \item no special characters between the two quote characters, + where special is one of: +\begin{verbatim} +&<>()@^| +\end{verbatim} + \item there are one or more whitespace characters between the + the two quote characters. + \item the string between the two quote characters is the name + of an executable file. + \end{itemize} + +\item Otherwise, old behavior is to see if the first character is + a quote character and if so, strip the leading character and + remove the last quote character on the command line, preserving + any text after the last quote character. + +\end{enumerate} + + +The following example of the use of the Client Run Before Job directive was +submitted by a user:\\ +You could write a shell script to back up a DB2 database to a FIFO. The shell +script is: + +\footnotesize +\begin{verbatim} + #!/bin/sh + # ===== backupdb.sh + DIR=/u01/mercuryd + + mkfifo $DIR/dbpipe + db2 BACKUP DATABASE mercuryd TO $DIR/dbpipe WITHOUT PROMPTING & + sleep 1 +\end{verbatim} +\normalsize + +The following line in the Job resource in the bacula-dir.conf file: +\footnotesize +\begin{verbatim} + Client Run Before Job = "su - mercuryd -c \"/u01/mercuryd/backupdb.sh '%t' +'%l'\"" +\end{verbatim} +\normalsize + +When the job is run, you will get messages from the output of the script +stating that the backup has started. Even though the command being run is +backgrounded with \&, the job will block until the "db2 BACKUP DATABASE" +command, thus the backup stalls. + +To remedy this situation, the "db2 BACKUP DATABASE" line should be changed to +the following: + +\footnotesize +\begin{verbatim} + db2 BACKUP DATABASE mercuryd TO $DIR/dbpipe WITHOUT PROMPTING > $DIR/backup.log +2>&1 < /dev/null & +\end{verbatim} +\normalsize + +It is important to redirect the input and outputs of a backgrounded command to +/dev/null to prevent the script from blocking. + +\item [Run Before Job = \lt{}command\gt{}] +\index[dir]{Run Before Job} +\index[dir]{Directive!Run Before Job} +\index[dir]{Directive!Run Before Job} +The specified {\bf command} is run as an external program prior to running the +current Job. This directive is not required, but if it is defined, and if the +exit code of the program run is non-zero, the current Bacula job will be +canceled. + +\begin{verbatim} +Run Before Job = "echo test" +\end{verbatim} + it's equivalent to : +\begin{verbatim} +RunScript { + Command = "echo test" + RunsOnClient = No + RunsWhen = Before +} +\end{verbatim} + + Lutz Kittler has pointed out that using the RunBeforeJob directive can be a + simple way to modify your schedules during a holiday. For example, suppose + that you normally do Full backups on Fridays, but Thursday and Friday are + holidays. To avoid having to change tapes between Thursday and Friday when + no one is in the office, you can create a RunBeforeJob that returns a + non-zero status on Thursday and zero on all other days. That way, the + Thursday job will not run, and on Friday the tape you inserted on Wednesday + before leaving will be used. + +\item [Run After Job = \lt{}command\gt{}] +\index[dir]{Run After Job} +\index[dir]{Directive!Run After Job} + The specified {\bf command} is run as an external program if the current + job terminates normally (without error or without being canceled). This + directive is not required. If the exit code of the program run is + non-zero, Bacula will print a warning message. Before submitting the + specified command to the operating system, Bacula performs character + substitution as described above for the {\bf RunScript} directive. + + An example of the use of this directive is given in the + \ilink{Tips Chapter}{JobNotification} of this manual. + + See the {\bf Run After Failed Job} if you + want to run a script after the job has terminated with any + non-normal status. + +\item [Run After Failed Job = \lt{}command\gt{}] +\index[dir]{Run After Job} +\index[dir]{Directive!Run After Job} + The specified {\bf command} is run as an external program after the current + job terminates with any error status. This directive is not required. The + command string must be a valid program name or name of a shell script. If + the exit code of the program run is non-zero, Bacula will print a + warning message. Before submitting the specified command to the + operating system, Bacula performs character substitution as described above + for the {\bf RunScript} directive. Note, if you wish that your script + will run regardless of the exit status of the Job, you can use this : +\begin{verbatim} +RunScript { + Command = "echo test" + RunsWhen = After + RunsOnFailure = yes + RunsOnClient = no + RunsOnSuccess = yes # default, you can drop this line +} +\end{verbatim} + + An example of the use of this directive is given in the + \ilink{Tips Chapter}{JobNotification} of this manual. + + +\item [Client Run Before Job = \lt{}command\gt{}] +\index[dir]{Client Run Before Job} +\index[dir]{Directive!Client Run Before Job} + This directive is the same as {\bf Run Before Job} except that the + program is run on the client machine. The same restrictions apply to + Unix systems as noted above for the {\bf RunScript}. + +\item [Client Run After Job = \lt{}command\gt{}] + \index[dir]{Client Run After Job} + \index[dir]{Directive!Client Run After Job} + The specified {\bf command} is run on the client machine as soon + as data spooling is complete in order to allow restarting applications + on the client as soon as possible. . + + Note, please see the notes above in {\bf RunScript} + concerning Windows clients. + +\item [Rerun Failed Levels = \lt{}yes|no\gt{}] + \index[dir]{Rerun Failed Levels} + \index[dir]{Directive!Rerun Failed Levels} + If this directive is set to {\bf yes} (default no), and Bacula detects that + a previous job at a higher level (i.e. Full or Differential) has failed, + the current job level will be upgraded to the higher level. This is + particularly useful for Laptops where they may often be unreachable, and if + a prior Full save has failed, you wish the very next backup to be a Full + save rather than whatever level it is started as. + + There are several points that must be taken into account when using this + directive: first, a failed job is defined as one that has not terminated + normally, which includes any running job of the same name (you need to + ensure that two jobs of the same name do not run simultaneously); + secondly, the {\bf Ignore FileSet Changes} directive is not considered + when checking for failed levels, which means that any FileSet change will + trigger a rerun. + +\item [Spool Data = \lt{}yes|no\gt{}] + \index[dir]{Spool Data} + \index[dir]{Directive!Spool Data} + + If this directive is set to {\bf yes} (default no), the Storage daemon will + be requested to spool the data for this Job to disk rather than write it + directly to tape. Once all the data arrives or the spool files' maximum sizes + are reached, the data will be despooled and written to tape. Spooling data + prevents tape shoe-shine (start and stop) during + Incremental saves. If you are writing to a disk file using this option + will probably just slow down the backup jobs. + + NOTE: When this directive is set to yes, Spool Attributes is also + automatically set to yes. + +\item [Spool Attributes = \lt{}yes|no\gt{}] + \index[dir]{Spool Attributes} + \index[dir]{Directive!Spool Attributes} + \index[dir]{slow} + \index[general]{slow} + \index[dir]{Backups!slow} + \index[general]{Backups!slow} + The default is set to {\bf no}, which means that the File attributes are + sent by the Storage daemon to the Director as they are stored on tape. + However, if you want to avoid the possibility that database updates will + slow down writing to the tape, you may want to set the value to {\bf + yes}, in which case the Storage daemon will buffer the File attributes + and Storage coordinates to a temporary file in the Working Directory, + then when writing the Job data to the tape is completed, the attributes + and storage coordinates will be sent to the Director. + + NOTE: When Spool Data is set to yes, Spool Attributes is also + automatically set to yes. + +\item [Where = \lt{}directory\gt{}] + \index[dir]{Where} + \index[dir]{Directive!Where} + This directive applies only to a Restore job and specifies a prefix to + the directory name of all files being restored. This permits files to + be restored in a different location from which they were saved. If {\bf + Where} is not specified or is set to backslash ({\bf /}), the files will + be restored to their original location. By default, we have set {\bf + Where} in the example configuration files to be {\bf + /tmp/bacula-restores}. This is to prevent accidental overwriting of + your files. + +\item [Add Prefix = \lt{}directory\gt{}] + \label{confaddprefix} + \index[dir]{AddPrefix} + \index[dir]{Directive!AddPrefix} + This directive applies only to a Restore job and specifies a prefix to the + directory name of all files being restored. This will use \ilink{File + Relocation}{filerelocation} feature implemented in Bacula 2.1.8 or later. + +\item [Add Suffix = \lt{}extention\gt{}] + \index[dir]{AddSuffix} + \index[dir]{Directive!AddSuffix} + This directive applies only to a Restore job and specifies a suffix to all + files being restored. This will use \ilink{File Relocation}{filerelocation} + feature implemented in Bacula 2.1.8 or later. + + Using \texttt{Add Suffix=.old}, \texttt{/etc/passwd} will be restored to + \texttt{/etc/passwsd.old} + +\item [Strip Prefix = \lt{}directory\gt{}] + \index[dir]{StripPrefix} + \index[dir]{Directive!StripPrefix} + This directive applies only to a Restore job and specifies a prefix to remove + from the directory name of all files being restored. This will use the + \ilink{File Relocation}{filerelocation} feature implemented in Bacula 2.1.8 + or later. + + Using \texttt{Strip Prefix=/etc}, \texttt{/etc/passwd} will be restored to + \texttt{/passwd} + + Under Windows, if you want to restore \texttt{c:/files} to \texttt{d:/files}, + you can use : + +\begin{verbatim} + Strip Prefix = c: + Add Prefix = d: +\end{verbatim} + +\item [RegexWhere = \lt{}expressions\gt{}] + \index[dir]{RegexWhere} + \index[dir]{Directive!RegexWhere} + This directive applies only to a Restore job and specifies a regex filename + manipulation of all files being restored. This will use \ilink{File + Relocation}{filerelocation} feature implemented in Bacula 2.1.8 or later. + + For more informations about how use this option, see + \ilink{this}{useregexwhere}. + +\item [Replace = \lt{}replace-option\gt{}] + \index[dir]{Replace} + \index[dir]{Directive!Replace} + This directive applies only to a Restore job and specifies what happens + when Bacula wants to restore a file or directory that already exists. + You have the following options for {\bf replace-option}: + +\begin{description} + +\item [always] + \index[dir]{always} + when the file to be restored already exists, it is deleted and then + replaced by the copy that was backed up. + +\item [ifnewer] +\index[dir]{ifnewer} + if the backed up file (on tape) is newer than the existing file, the + existing file is deleted and replaced by the back up. + +\item [ifolder] + \index[dir]{ifolder} + if the backed up file (on tape) is older than the existing file, the + existing file is deleted and replaced by the back up. + +\item [never] + \index[dir]{never} + if the backed up file already exists, Bacula skips restoring this file. +\end{description} + +\item [Prefix Links=\lt{}yes|no\gt{}] + \index[dir]{Prefix Links} + \index[dir]{Directive!Prefix Links} + If a {\bf Where} path prefix is specified for a recovery job, apply it + to absolute links as well. The default is {\bf No}. When set to {\bf + Yes} then while restoring files to an alternate directory, any absolute + soft links will also be modified to point to the new alternate + directory. Normally this is what is desired -- i.e. everything is self + consistent. However, if you wish to later move the files to their + original locations, all files linked with absolute names will be broken. + +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] + \index[dir]{Maximum Concurrent Jobs} + \index[dir]{Directive!Maximum Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of Jobs from the current + Job resource that can run concurrently. Note, this directive limits + only Jobs with the same name as the resource in which it appears. Any + other restrictions on the maximum concurrent jobs such as in the + Director, Client, or Storage resources will also apply in addition to + the limit specified here. The default is set to 1, but you may set it + to a larger number. We strongly recommend that you read the WARNING + documented under \ilink{ Maximum Concurrent Jobs}{DirMaxConJobs} in the + Director's resource. + +\item [Reschedule On Error = \lt{}yes|no\gt{}] + \index[dir]{Reschedule On Error} + \index[dir]{Directive!Reschedule On Error} + If this directive is enabled, and the job terminates in error, the job + will be rescheduled as determined by the {\bf Reschedule Interval} and + {\bf Reschedule Times} directives. If you cancel the job, it will not + be rescheduled. The default is {\bf no} (i.e. the job will not be + rescheduled). + + This specification can be useful for portables, laptops, or other + machines that are not always connected to the network or switched on. + +\item [Reschedule Interval = \lt{}time-specification\gt{}] + \index[dir]{Reschedule Interval} + \index[dir]{Directive!Reschedule Interval} + If you have specified {\bf Reschedule On Error = yes} and the job + terminates in error, it will be rescheduled after the interval of time + specified by {\bf time-specification}. See \ilink{the time + specification formats}{Time} in the Configure chapter for details of + time specifications. If no interval is specified, the job will not be + rescheduled on error. + +\item [Reschedule Times = \lt{}count\gt{}] + \index[dir]{Reschedule Times} + \index[dir]{Directive!Reschedule Times} + This directive specifies the maximum number of times to reschedule the + job. If it is set to zero (the default) the job will be rescheduled an + indefinite number of times. + +\item [Run = \lt{}job-name\gt{}] + \index[dir]{Run} + \index[dir]{Directive!Run} + \index[dir]{Clone a Job} + The Run directive (not to be confused with the Run option in a + Schedule) allows you to start other jobs or to clone jobs. By using the + cloning keywords (see below), you can backup + the same data (or almost the same data) to two or more drives + at the same time. The {\bf job-name} is normally the same name + as the current Job resource (thus creating a clone). However, it + may be any Job name, so one job may start other related jobs. + + The part after the equal sign must be enclosed in double quotes, + and can contain any string or set of options (overrides) that you + can specify when entering the Run command from the console. For + example {\bf storage=DDS-4 ...}. In addition, there are two special + keywords that permit you to clone the current job. They are {\bf level=\%l} + and {\bf since=\%s}. The \%l in the level keyword permits + entering the actual level of the current job and the \%s in the since + keyword permits putting the same time for comparison as used on the + current job. Note, in the case of the since keyword, the \%s must be + enclosed in double quotes, and thus they must be preceded by a backslash + since they are already inside quotes. For example: + +\begin{verbatim} + run = "Nightly-backup level=%l since=\"%s\" storage=DDS-4" +\end{verbatim} + + A cloned job will not start additional clones, so it is not + possible to recurse. + + Please note that all cloned jobs, as specified in the Run directives are + submitted for running before the original job is run (while it is being + initialized). This means that any clone job will actually start before + the original job, and may even block the original job from starting + until the original job finishes unless you allow multiple simultaneous + jobs. Even if you set a lower priority on the clone job, if no other + jobs are running, it will start before the original job. + + If you are trying to prioritize jobs by using the clone feature (Run + directive), you will find it much easier to do using a RunScript + resource, or a RunBeforeJob directive. + +\label{Priority} +\item [Priority = \lt{}number\gt{}] + \index[dir]{Priority} + \index[dir]{Directive!Priority} + This directive permits you to control the order in which your jobs will + be run by specifying a positive non-zero number. The higher the number, + the lower the job priority. Assuming you are not running concurrent jobs, + all queued jobs of priority 1 will run before queued jobs of priority 2 + and so on, regardless of the original scheduling order. + + The priority only affects waiting jobs that are queued to run, not jobs + that are already running. If one or more jobs of priority 2 are already + running, and a new job is scheduled with priority 1, the currently + running priority 2 jobs must complete before the priority 1 job is run. + + The default priority is 10. + + If you want to run concurrent jobs you should + keep these points in mind: + +\begin{itemize} +\item See \ilink{Running Concurrent Jobs}{ConcurrentJobs} on how to setup + concurrent jobs. + +\item Bacula concurrently runs jobs of only one priority at a time. It + will not simultaneously run a priority 1 and a priority 2 job. + +\item If Bacula is running a priority 2 job and a new priority 1 job is + scheduled, it will wait until the running priority 2 job terminates even + if the Maximum Concurrent Jobs settings would otherwise allow two jobs + to run simultaneously. + +\item Suppose that bacula is running a priority 2 job and a new priority 1 + job is scheduled and queued waiting for the running priority 2 job to + terminate. If you then start a second priority 2 job, the waiting + priority 1 job will prevent the new priority 2 job from running + concurrently with the running priority 2 job. That is: as long as there + is a higher priority job waiting to run, no new lower priority jobs will + start even if the Maximum Concurrent Jobs settings would normally allow + them to run. This ensures that higher priority jobs will be run as soon + as possible. +\end{itemize} + +If you have several jobs of different priority, it may not best to start +them at exactly the same time, because Bacula must examine them one at a +time. If by Bacula starts a lower priority job first, then it will run +before your high priority jobs. If you experience this problem, you may +avoid it by starting any higher priority jobs a few seconds before lower +priority ones. This insures that Bacula will examine the jobs in the +correct order, and that your priority scheme will be respected. + +\label{WritePartAfterJob} +\item [Write Part After Job = \lt{}yes|no\gt{}] +\index[dir]{Write Part After Job} +\index[dir]{Directive!Write Part After Job} + This directive is only implemented in version 1.37 and later. + If this directive is set to {\bf yes} (default {\bf no}), a new part file + will be created after the job is finished. + + It should be set to {\bf yes} when writing to devices that require mount + (for example DVD), so you are sure that the current part, containing + this job's data, is written to the device, and that no data is left in + the temporary file on the hard disk. However, on some media, like DVD+R + and DVD-R, a lot of space (about 10Mb) is lost every time a part is + written. So, if you run several jobs each after another, you could set + this directive to {\bf no} for all jobs, except the last one, to avoid + wasting too much space, but to ensure that the data is written to the + medium when all jobs are finished. + + This directive is ignored with tape and FIFO devices. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[dir]{Heartbeat Interval} + \index[dir]{Directive!Heartbeat} + This directive is optional and if specified will cause the Director to + set a keepalive interval (heartbeat) in seconds on each of the sockets + it opens for the Client resource. This value will override any + specified at the Director level. It is implemented only on systems + (Linux, ...) that provide the {\bf setsockopt} TCP\_KEEPIDLE function. + The default value is zero, which means no change is made to the socket. + +\end{description} + +The following is an example of a valid Job resource definition: + +\footnotesize +\begin{verbatim} +Job { + Name = "Minou" + Type = Backup + Level = Incremental # default + Client = Minou + FileSet="Minou Full Set" + Storage = DLTDrive + Pool = Default + Schedule = "MinouWeeklyCycle" + Messages = Standard +} +\end{verbatim} +\normalsize + +\section{The JobDefs Resource} +\label{JobDefsResource} +\index[general]{JobDefs Resource} +\index[general]{Resource!JobDefs} + +The JobDefs resource permits all the same directives that can appear in a Job +resource. However, a JobDefs resource does not create a Job, rather it can be +referenced within a Job to provide defaults for that Job. This permits you to +concisely define several nearly identical Jobs, each one referencing a JobDefs +resource which contains the defaults. Only the changes from the defaults need to +be mentioned in each Job. + +\section{The Schedule Resource} +\label{ScheduleResource} +\index[general]{Resource!Schedule} +\index[general]{Schedule Resource} + +The Schedule resource provides a means of automatically scheduling a Job as +well as the ability to override the default Level, Pool, Storage and Messages +resources. If a Schedule resource is not referenced in a Job, the Job can only +be run manually. In general, you specify an action to be taken and when. + +\begin{description} + +\item [Schedule] +\index[dir]{Schedule} +\index[dir]{Directive!Schedule} + Start of the Schedule directives. No {\bf Schedule} resource is + required, but you will need at least one if you want Jobs to be + automatically started. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The name of the schedule being defined. The Name directive is required. + +\item [Run = \lt{}Job-overrides\gt{} \lt{}Date-time-specification\gt{}] + \index[dir]{Run} + \index[dir]{Directive!Run} + The Run directive defines when a Job is to be run, and what overrides if + any to apply. You may specify multiple {\bf run} directives within a + {\bf Schedule} resource. If you do, they will all be applied (i.e. + multiple schedules). If you have two {\bf Run} directives that start at + the same time, two Jobs will start at the same time (well, within one + second of each other). + + The {\bf Job-overrides} permit overriding the Level, the Storage, the + Messages, and the Pool specifications provided in the Job resource. In + addition, the FullPool, the IncrementalPool, and the DifferentialPool + specifications permit overriding the Pool specification according to + what backup Job Level is in effect. + + By the use of overrides, you may customize a particular Job. For + example, you may specify a Messages override for your Incremental + backups that outputs messages to a log file, but for your weekly or + monthly Full backups, you may send the output by email by using a + different Messages override. + + {\bf Job-overrides} are specified as: {\bf keyword=value} where the + keyword is Level, Storage, Messages, Pool, FullPool, DifferentialPool, + or IncrementalPool, and the {\bf value} is as defined on the respective + directive formats for the Job resource. You may specify multiple {\bf + Job-overrides} on one {\bf Run} directive by separating them with one or + more spaces or by separating them with a trailing comma. For example: + +\begin{description} + +\item [Level=Full] + \index[dir]{Level} + \index[dir]{Directive!Level} + is all files in the FileSet whether or not they have changed. + +\item [Level=Incremental] + \index[dir]{Level} + \index[dir]{Directive!Level} + is all files that have changed since the last backup. + +\item [Pool=Weekly] + \index[dir]{Pool} + \index[dir]{Directive!Pool} + specifies to use the Pool named {\bf Weekly}. + +\item [Storage=DLT\_Drive] + \index[dir]{Storage} + \index[dir]{Directive!Storage} + specifies to use {\bf DLT\_Drive} for the storage device. + +\item [Messages=Verbose] + \index[dir]{Messages} + \index[dir]{Directive!Messages} + specifies to use the {\bf Verbose} message resource for the Job. + +\item [FullPool=Full] + \index[dir]{FullPool} + \index[dir]{Directive!FullPool} + specifies to use the Pool named {\bf Full} if the job is a full backup, or +is +upgraded from another type to a full backup. + +\item [DifferentialPool=Differential] + \index[dir]{DifferentialPool} + \index[dir]{Directive!DifferentialPool} + specifies to use the Pool named {\bf Differential} if the job is a + differential backup. + +\item [IncrementalPool=Incremental] + \index[dir]{IncrementalPool} + \index[dir]{Directive!IncrementalPool} + specifies to use the Pool named {\bf Incremental} if the job is an +incremental backup. + +\item [SpoolData=yes|no] + \index[dir]{SpoolData} + \index[dir]{Directive!SpoolData} + tells Bacula to request the Storage daemon to spool data to a disk file + before writing it to the Volume (normally a tape). Thus the data is + written in large blocks to the Volume rather than small blocks. This + directive is particularly useful when running multiple simultaneous + backups to tape. It prevents interleaving of the job data and reduces + or eliminates tape drive stop and start commonly known as "shoe-shine". + +\item [SpoolSize={\it bytes}] + \index[dir]{SpoolSize} + \index[dir]{Directive!SpoolSize} + where the bytes specify the maximum spool size for this job. + The default is take from Device Maximum Spool Size limit. + This directive is available only in Bacula version 2.3.5 or + later. + +\item [WritePartAfterJob=yes|no] + \index[dir]{WritePartAfterJob} + \index[dir]{Directive!WritePartAfterJob} + tells Bacula to request the Storage daemon to write the current part + file to the device when the job is finished (see \ilink{Write Part After + Job directive in the Job resource}{WritePartAfterJob}). Please note, + this directive is implemented only in version 1.37 and later. The + default is yes. We strongly recommend that you keep this set to yes + otherwise, when the last job has finished one part will remain in the + spool file and restore may or may not work. + +\end{description} + +{\bf Date-time-specification} determines when the Job is to be run. The +specification is a repetition, and as a default Bacula is set to run a job at +the beginning of the hour of every hour of every day of every week of every +month of every year. This is not normally what you want, so you must specify +or limit when you want the job to run. Any specification given is assumed to +be repetitive in nature and will serve to override or limit the default +repetition. This is done by specifying masks or times for the hour, day of the +month, day of the week, week of the month, week of the year, and month when +you want the job to run. By specifying one or more of the above, you can +define a schedule to repeat at almost any frequency you want. + +Basically, you must supply a {\bf month}, {\bf day}, {\bf hour}, and {\bf +minute} the Job is to be run. Of these four items to be specified, {\bf day} +is special in that you may either specify a day of the month such as 1, 2, +... 31, or you may specify a day of the week such as Monday, Tuesday, ... +Sunday. Finally, you may also specify a week qualifier to restrict the +schedule to the first, second, third, fourth, or fifth week of the month. + +For example, if you specify only a day of the week, such as {\bf Tuesday} the +Job will be run every hour of every Tuesday of every Month. That is the {\bf +month} and {\bf hour} remain set to the defaults of every month and all +hours. + +Note, by default with no other specification, your job will run at the +beginning of every hour. If you wish your job to run more than once in any +given hour, you will need to specify multiple {\bf run} specifications each +with a different minute. + +The date/time to run the Job can be specified in the following way in +pseudo-BNF: + +\footnotesize +\begin{verbatim} + = on + = at + = 1st | 2nd | 3rd | 4th | 5th | first | + second | third | fourth | fifth + = sun | mon | tue | wed | thu | fri | sat | + sunday | monday | tuesday | wednesday | + thursday | friday | saturday + = w00 | w01 | ... w52 | w53 + = jan | feb | mar | apr | may | jun | jul | + aug | sep | oct | nov | dec | january | + february | ... | december + = daily + = weekly + = monthly + = hourly + = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 0 + = | +<12hour> = 0 | 1 | 2 | ... 12 + = 0 | 1 | 2 | ... 23 + = 0 | 1 | 2 | ... 59 + = 1 | 2 | ... 31 +
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/install/install.css b/docs/manuals/de/install/install.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/de/install/install.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/de/install/install.tex b/docs/manuals/de/install/install.tex new file mode 100644 index 00000000..3b325fe3 --- /dev/null +++ b/docs/manuals/de/install/install.tex @@ -0,0 +1,95 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{dir}{ddx}{dnd}{Director Index} +\newindex{fd}{fdx}{fnd}{File Daemon Index} +\newindex{sd}{sdx}{snd}{Storage Daemon Index} +\newindex{console}{cdx}{cnd}{Console Index} +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Installation and Configuration Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{quickstart} +\include{installation} +\include{critical} +\include{configure} +\include{dirdconf} +\include{filedconf} +\include{storedconf} +\include{messagesres} +\include{consoleconf} +\include{monitorconf} +\include{security} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] +\printindex[dir] +\printindex[fd] +\printindex[sd] +\printindex[console] + +\end{document} diff --git a/docs/manuals/de/install/installation.tex b/docs/manuals/de/install/installation.tex new file mode 100644 index 00000000..bc17c4dd --- /dev/null +++ b/docs/manuals/de/install/installation.tex @@ -0,0 +1,850 @@ +%% +%% + +\chapter{Bacula installieren} +\label{InstallChapter} +\index[general]{Installation!von Bacula } +\index[general]{Bacula installieren } + +Normalerweise ben\"{o}tigen Sie ein Release mit Baculas Quellcode und, wenn ein Windows-Client benutzt werde soll, ein ausf\"{u}hrbares Release von Bacula f\"{u}r Windows. Entprechend Ihrer Konfigurationsoptionen ben\"{o}tigt Bacula bestimmte Pakete von Drittanbietern (wie z.B. {\bf SQLite}, {\bf MySQL} oder {\bf PostgreSQL}) zur Kompilierung. Um Ihnen die Arbeit zu erleichtern, haben wir einige dieser Softwarepakete als zwei {\bf depkgs}-Releases ver\"{o}ffentlicht (Dependency Packages). Dies kann Ihr Leben ungemein erleichtern, da Sie so mit allen notwendigen Pakaten versorgt anstatt gezwungen sind, sie selbst einzeln im Internet zu finden und zu installieren. + +\section{Source Release Files} +\index[general]{Source Files} +\index[general]{Release Files} + +Seit Baculas Version 1.38.0 ist der Quellcode in vier einzelne Tar-Dateien aufgeteilt, die jeweils einem Modul in Baculas CVS entsprechen. Im einzelnen sind dies: + +\begin{description} +\item [bacula-1.38.0.tar.gz] +Dies ist Baculas Quellcode. Mit jedem Release erh\"{o}ht sich die Versionsnummer. + +\item [bacula-docs-1.38.0.tar.gz] +Diese Datei enth\"{a}lt eine Kopie des Verzeichnisses der Dokumente im CVS. Einige Dokumente sind vorkompiliert. F\"{u}r Englisch existiert ein HTML-Verzeichnis, ein einzelnes HTML-File und eine PDF-Datei. Die franz\"{o}sische und die deutsche Übersetzung sind in Arbeit, aber nicht kompiliert. + +\item [bacula-gui-1.38.0.tar.gz] +Diese Datei enth\"{a}lt grafische Benutzeroberfl\"{a}chen, die nicht Bestandteil des Hauptprogrammes sind. Momentan sind dies ``bacula-web'' zur Generierung von Verwaltungsansichten Ihrer Bacula-Jobs innerhalb eines Web-Browsers und ``bimagemgr'', ein Dateibrowser, der verwendet wird, um aus Bacula-Volumes CD-Images zu brennen. + +\item [bacula-rescue-1.8.1.tar.gz] +Dies ist der Code f\"{u}r die Bacula Rettungs-CD. Die Versionsnummer diese Paketes ist nicht an die Versionsummer von Bacula gebunden und wird sich daher unterscheiden. Mit diesem Code k\"{o}nnen Sie eine CD brennen, die unter anderem eine Beschreibung Ihrer Systemkonfiguration und die statisch gelinkte Version des File-D\"{a}mons enth\"{a}lt. Damit k\"{o}nnen Sie im Falle eines Festplattenausfalles mit Hilfe von Bacula Ihre Festplatten neu partitionieren, formatieren und Ihr System auf einfache Art wiederherstellen. + +\end{description} + + +\label{upgrading1} + +\section{Bacula upgraden} +\index[general]{Bacula!als Upgrade } +\index[general]{Bacula upgraden } +\index[general]{Upgrade } + +Wenn Sie Bacula von einer Version auf die n\"{a}chste upgraden, sollten Sie erst die ReleaseNotes aller Versionen zwischen Ihrer laufenden und jener, auf die sie upgraden wollen, sorgf\"{a}ltig lesen. Wenn die Bacula Catalog-Datenbank upgegraded wurde, m\"{u}ssen Sie entweder ganz von vorne anfangen und Ihre Datenbank neu initialisieren oder diese als ASCII-Datei sichern und dann mit dem Upgrade fortfahren. +Dies geschieht normalerweise nachdem Bacula kompiliert und installiert ist durch Eingabe von: + +\begin{verbatim} +cd (default /etc/bacula) +./update_bacula_tables +\end{verbatim} + +Dieses Update-Skript finden Sie auch in Baculas Quellcode im Verzeichnis ``src/cats'': + +Gab es zwischen Ihrer Version und der aktuellen mehrere Datenbank-Upgrades, werden Sie jedes einzelne Datenbank Upgradeskript ausf\"{u}hren m\"{u}ssen. Um Ihnen dies zu erleichtern, sind alle alten Upgrade-Skripte im Verzeichnis {\bf upgradedb} des Quellcodes. Sie werden diese Skripte den Gegebenheiten Ihrer Systemkonfiguration anpassen m\"{u}ssen. + +Das letzte Upgrade-Skript (wenn vorhanden) wird dann so ausgef\"{u}hrt, wie es oben beschrieben ist. + +Wenn Sie von einer Hauptversion auf die n\"{a}chste upgraden, m\"{u}ssen alle Komponenten gleichzeitig ersetzt werden, da sich in der Regel das Übertragungs-Protokoll zwischen den D\"{a}monen \"{a}ndert. Innerhalb eines bestimmten Release (z.B. Version 1.32.x) wird sich das D\"{a}mon-Protokoll jedoch nicht \"{a}ndern solange nicht ein Bug oder ein Versehen zu beheben ist. Wenn das alles f\"{u}r Sie verwirrend ist, lesen Sie einfach die ReleaseNotes sehr sorgf\"{a}ltig. Es wird hier stehen, wenn alle D\"{a}monen gleichzeitig upgegraded werden m\"{u}ssen. + +Beachten Sie schlie{\ss}lich, dass es in der Regel nicht notwendig ist, vor dem Upgrade ein {\bf make uninstall} auszuf\"{u}hren. Tats\"{a}chlich werden Sie so sehr wahrscheinlich alle ihre Konfigurationsdateien zerst\"{o}ren, was verheerend sein k\"{o}nnte. +Die normale Upgrade-Prozedur besteht einfach in der Eingabe von {\bf make install}. +Im allgemeinen werden dabei keine Ihrer ``.conf''- oder ``.sql''-Dateien \"{u}berschrieben. + + Weiteres zum Upgraden lesen sie im Abschnitt \ilink{Upgrading Bacula Versions}{upgrading} im Kapitel ``Tips'' in diesem Handbuch. + +\section{Dependency-Packages} +\label{Dependency} +\index[general]{Dependency-Packages } +\index[general]{Packages!Dependency } + +Wie oben erw\"{a}hnt, haben wir einige Pakete von Drittanbietern, die Bacula m\"{o}glicherweise ben\"{o}tigt, in den Releases {\bf depkgs} und {\bf depkgs1} zusammengefasst. Nat\"{u}rlich k\"{o}nnen Sie sich auch die neuesten Versionen von den Original-Autoren besorgen. Die Quellen der einzelnen Pakete stehen in der README-Datei jedes einzelnen Paketes. Beachten Sie jedoch, dass die Pakete der \textbf{depkgs}-Dateien von uns auf ihre Kompatibilit\"{a}t zu Bacula getestet wurden. + +Typischerweise hei{\ss}en die Dependency-Packages {\bf depkgs-ddMMMyy.tar.gz} und +{\bf depkgs1-ddMMyy.tar.gz} wobei {\bf dd} der Tag, {\bf MMM} der Monat in abgek\"{u}rzter Form (z.B. ``Jan'') und {\bf yy} das Jahr ist, an dem es herausgegeben wurde. Ein aktuelles Beispiel ist: {\bf depkgs-07Apr02.tar.gz}. Um es zu installiern und zu kompilieren (wenn es ben\"{o}tigt wird) gehen Sie wie folgt vor: + +\begin{enumerate} +\item Erstellen sie ein {\bf bacula}-Verzeichnis, in das Sie sowohl die Bacula-Quelldateien als auch das Dependency-Packages legen. +\item Entpacken Sie das {\bf depkg} mit ``detar'' in das {\bf bacula}-Verzeichnis. +\item cd bacula/depkgs +\item make + \end{enumerate} + +Die genaue Zusanmmensetzung der Dependency-Packages wird sich von Zeit zu Zeit \"{a}ndern. Momentan sehen sie so aus: + +\addcontentsline{lot}{table}{Dependency-Packages} +\begin{longtable}{|l|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Drittanbieterpaket } & \multicolumn{1}{c| }{\bf +depkgs } & \multicolumn{1}{c| }{\bf depkgs1 } & \multicolumn{1}{c| }{\bf +depkgs-win32 } \\ + \hline {SQLite } & \multicolumn{1}{c| }{X } & \multicolumn{1}{c| }{- } & +\multicolumn{1}{c| }{- } \\ + \hline {mtx } & \multicolumn{1}{c| }{X } & \multicolumn{1}{c| }{- } & +\multicolumn{1}{c| }{- } \\ + \hline {readline } & \multicolumn{1}{c| }{- } & \multicolumn{1}{c| }{X } & +\multicolumn{1}{c| }{- } \\ + \hline {pthreads } & \multicolumn{1}{c| }{- } & \multicolumn{1}{c| }{- } & +\multicolumn{1}{c| }{X } \\ + \hline {zlib } & \multicolumn{1}{c| }{- } & \multicolumn{1}{c| }{- } & +\multicolumn{1}{c| }{X } \\ + \hline {wxWidgets } & \multicolumn{1}{c| }{- } & \multicolumn{1}{c| }{- } & +\multicolumn{1}{c| }{X } +\\ \hline + +\end{longtable} + + +Beachten Sie, dass einige dieser Pakete recht umfangreich sind, so dass ihre Compilierung einige Zeit beanspruchen kann. Mit den obigen Anweisungen werden alle Pakete im entsprechenden Verzeichnis kompiliert. Bacula wird allerdings bei seine Kompilierung nur jene Teile verwenden, die es tats\"{a}chlich ben\"{o}tigt. + +Alternativ k\"{o}nnen Sie nur jene Pakete kompilieren, die Sie tats\"{a}chlich ben\"{o}tigen. Beispielsweise wird + +\footnotesize +\begin{verbatim} +cd bacula/depkgs +make sqlite +\end{verbatim} +\normalsize + +nur das ``SQLite''-Paket konfigurieren und kompilieren. + +Sie sollten die ben\"{o}tigten Pakete aus {\bf depkgs} und/oder {\bf depkgs1} kompilieren bevor Sie Bacula konfigurieren und kompilieren, da Bacula diese w\"{a}hrend seiner eigenen Kompilierung ben\"{o}tigt. + +Auch wenn Sie SQLite nicht verwenden, k\"{o}nnte es sich f\"{u}r Sie lohnen {\bf mtx} zu kompilieren, da das enthaltenen {\bf tapeinfo}-Programm oft wertvolle Informationen \"{u}ber Ihr SCSI-Bandlaufwerk (z.B. Kompression, min./max. Blockgr\"{o}{\ss}e...) liefern kann. + +Das {\bf depkgs-win32}-Paket enth\"{a}lt den Qullcode der ``Pthreads''-, ``wxWidgets''- und ``zlib''-Bibliotheken, die das Win32-Clientprogramm verwendet. Man ben\"{o}tigt diese nur, wenn Sie das Win32-Programm selbst kompilieren wollen. + +\section{Unterst\"{u}tzte Betriebssysteme} +\label{Systems} +\index[general]{Betriebssysteme!Unterst\"{u}tzte } +\index[general]{Unterst\"{u}tzte Betriebssysteme } + +Lesen sie bitte den Abschnitt +\ilink{Unterst\"{u}tzte Betriebssysteme}{SupportedOSes} im Kapitel +``QuickStart'' dieses Handbuches. + +\section{Bacula aus dem Quellcode kompilieren} +\label{Building} +\index[general]{Quellcode!Kompilation von Bacula aus dem } +\index[general]{Bacula aus dem Quellcode kompilieren} + +Die Grundinstallation ist ziemlich einfach. + +\begin{enumerate} +\item Installieren und kompilieren sie alle ben\"{o}tgten {\bf depkgs} wie oben beschrieben. +\item Konfigurieren und installieren Sie ``MySQL'' oder ``PostgreSQL'' (wenn gew\"{u}nscht) +\ilink{Installation und Konfiguration von MySQL Phase I}{_ChapterStart} oder +\ilink{Installation und Konfiguration von PostgreSQL Phase I}{_ChapterStart10}. Wenn Sie f\"{u}r die Installation von ``MySQL'' ein RPM verwenden, m\"{u}ssen Sie auch {\bf mysql-devel} installieren, so dass die Header-Dateien verf\"{u}gbar sind, wenn Sie Bacula kompilieren. Zus\"{a}tzlich erfordert die MySQL Client-Bibliothek die gzip-Kompressionsbibliotheken {\bf libz.a} oder {\bf libz.so}. Wenn Sie RPM-Pakete verwenden, sind diese Bibliotheken im Paket {\bf zlib1g-dev}. Auf Debian-Systemen m\"{u}ssen Sie das {\bf zlib1g-dev}-Paket laden. Wenn Sie weder RPMs noch debs verwenden, m\"{u}ssen Sie die passenden Pakete f\"{u}r Ihr System selbst finden. +Wenn auf Ihrem System schon MySQL oder PostgreSQL l\"{a}uft, k\"{o}nnen Sie diese Phase \"{u}berspringen, wenn Sie ``thread safe''-Bibliotheken kompiliert und die oben erw\"{a}hnten zus\"{a}tzlichen RPMs installiert haben. + +\item Anstatt ``MySQL'' und ``PostgreSQL'' k\"{o}nnen Sie auch SQLite konfigurieren und installieren \ilink{Installation und Konfiguration von SQLite}{_ChapterStart33}. Dessen Quellcode ist Teil des {\bf depkgs}-Paketes. +\item Entpacken sie Baculas Quellcode vorzugsweise in das {\bf bacula}-Verzeichnis, welches oben erw\"{a}hnt wurde. +\item Wechseln ({\bf cd}) Sie in das Verzeichnis mit dem Quellcode. +\item F\"{u}hren Sie \textbf{./configure} aus (mit den entsprechenden Konfigurationsoptionen, die weiter unten n\"{a}her beschrieben sind. +\item Pr\"{u}fen Sie die Ausgabe des \textbf{./configure}-Befehls sehr sorgf\"{a}ltig, besonders die Ausgaben zum Installationsverzeichnis der Programm- und der Konfigurationsdateien. Sind diese nicht korrekt, wiederholen Sie \textbf{./configure} bis sie stimmen. Die Ausgabe des ./configure-Befehls ist in der Datei {\bf config.out} abgespeichert und kann jederzeit wieder angesehen werden, ohne \textbf{./configure} neu zu starten, indem man {\bf cat config.out} eingibt. +\item Wenn Sie Optionen \"{a}ndern, nachdem \textbf{./configure} gelaufen war und Sie es neu starten m\"{u}ssen, geben Sie vorher das folgende ein. + +\footnotesize +\begin{verbatim} + make distclean +\end{verbatim} +\normalsize + +Damit gehen Sie sicher, dass Sie wirklich von vorne anfangen und keine Mischung der verschiedenen Optionen haben. Dies liegt daran, dass \textbf{./configure} einen Gro{\ss}teil der Informationen zwischenspeichert. {\bf make distclean} ist auch sehr wichtig, wenn Sie die Quellverzeichnisse auf einen anderen Rechner verlagern. Schl\"{a}gt der Befehl fehl, ignorieren Sie das einfach und machen mit + +\item make + +weiter. + +Wenn es hierbei Fehlermeldungen beim Linken in das Verzeichnis (src/stored) des Storage-D\"{a}mon gibt, liegt es vielleicht daran, dass sie die statischen Bibliotheken in Ihrem System nicht geladen sind. Diese Problem bemerkte ich auf einem Solaris-System. Verwenden sie den {\bf ./configure}-Befehl ohne die Option {\bf \verb{--{enable-static-tools} um den Fehler zu beheben. + +\item make install + +\item +Wenn Sie ein Bacula-Neuling sind, empfehlen wir \textbf{dringend}, den n\"{a}chsten Schritt zu \"{u}berspringen und die Vorgabe-Konfigurationsdateien zu verwenden. Probieren Sie damit das Beispiel im n\"{a}chsten Kapitel aus und \"{a}ndern sie danach Ihre Konfigurationsdateien, so dass sie Ihren eigenen Anforderungen entsprechen. + +\item Passen Sie die Konfigurationsdateien aller drei D\"{a}monprozesse und die des Console-Programms an. Einzelheiten hierzu im Abschnitt \ilink{Setting Up Bacula Configuration Files}{_ChapterStart16} des Kapitels ``Konfiguration'' in diesem Handbuch. Wir empfehlen Ihnen, an den beigef\"{u}gten Vorgabe-Konfigurationsdateien zun\"{a}chst nur soviel zu \"{a}ndern wie unbedingt notwendig ist. Eine endg\"{u}ltige Anpassung ist immer noch m\"{o}glich, wenn Bacula zuverl\"{a}ssig l\"{a}uft. Passen Sie bitte auf, wenn sie die (zuf\"{a}llig generierten) Passw\"{o}rter und die {\bf Name}n ver\"{a}ndern. Aus Sicherheitsgr\"{u}nden m\"{u}ssen diese in den Konfigurationsdateien \"{u}bereinstimmen. + +\item Erzeugen Sie die Datenbank und die Tabellen f\"{u}r Bacula in MySQL (wenn sie MySQL verwenden)(\ilink{MySQL installieren und Konfigurieren Phase II}{mysql_phase2}, in PostgreSQL (\ilink{PostgreSQL installieren und Konfigurieren Phase II}{PostgreSQL_phase2}) oder gegebenenfalls in SQLite (\ilink{SQLite installieren und Konfigurieren Phase II}{phase2}). + +\item Starten Sie Bacula ({\bf ./bacula start}). Im n\"{a}chsten Kapitel wird dies im einzelnen erkl\"{a}rt. +\item Kommunizieren Sie mit Bacula \"{u}ber das Console-Programm. + +\item Folgen Sie f\"{u}r die letzten beiden Punkte den Anweisungen im Kapitel \ilink{Running Bacula}{_ChapterStart1} diese Handbuches, wo Sie eine einfache Sicherung und eine Wiederherstellung durchf\"{u}hren. Tun Sie dies bevor Sie die Konfiguratinsdateien in gr\"{o}{\ss}erem Umfang ver\"{a}ndern, so dass Sie sicher sein k\"{o}nnen, dass Bacula funktioniert und Sie damit vertraut sind. Danach wird es einfacher sein, die Konfigurationsdateien anzupassen. + +\item Wenn Sie nach der Installation beschlie{\ss}en, mit Bacula ``umzuziehen'', d.h. es in anderen Verzeichnissen installieren zu wollen, gehen Sie wie folgt vor: + +\footnotesize +\begin{verbatim} + make uninstall + make distclean + ./configure (mit-den-neuen-Optionen) + make + make install +\end{verbatim} +\normalsize + +\end{enumerate} + +Wenn alles gut geht, wird der {\bf ./configure}-Prozess Ihr laufendes Betriebssystem korrekt erkennen und den Quellcode entsprechend konfigurieren. Momentan werden FreeBSD, Linux (Red Hat) und Solaris unterst\"{u}tzt. Von MacOS X 10.3 wird berichtet, dass der Client nur dann darauf l\"{a}uft, wenn die readline-Unterst\"{u}tzung deaktiviert ist. + +Wenn Sie Bacula auf mehr als einem System installieren, k\"{o}nnen Sie einfach den Verzeichnisbaum des Quellcodes auf den anderen Rechner \"{u}bertragen und ein ``make install'' ausf\"{u}hren. Gibt es jedoch Unterschiede in den Bibliotheken, den Betriebssystemversionen oder soll es auf einem anderen Betriebssystem installiert werden, sollten Sie mit der originalen tar-Datei beginnen. Wenn Sie die Verzeichnisstruktur des Quellcodes \"{u}bertragen und den ./configure-Befehl schon ausgef\"{u}hrt haben, m\"{u}ssen Sie unbedingt + +\footnotesize +\begin{verbatim} +make distclean +\end{verbatim} +\normalsize + +ausf\"{u}hren, bevor Sie ``./configure'' erneut aufrufen. Dies liegt daran, dass ``GNU autoconf'' die Konfiguration zwischenspeichert und wenn Sie beispielsweise die Konfiguration eines Linux-Rechners auf einem Solaris-System wiederverwenden, k\"{o}nnen Sie sicher sein, dass die Kompilierung fehlschl\"{a}gt. Um dies zu vermeiden starten Sie entweder mit der tar-Datei oder f\"{u}hren ``make distclean'' aus, wie oben erw\"{a}hnt. + +Gew\"{o}hnlich werden Sie einen etwas komplizierteren {\bf configure}-Befehl absetzen wollen, um sicher zu gehen, dass die von Ihnen gew\"{u}nschten Module kompiliert werden und alles in den richtigen Verzeichnissen abgelegt wird. + +Auf RedHat zum Beispiel k\"{o}nnte ``./configure'' so aussehen: + +\footnotesize +\begin{verbatim} +CFLAGS="-g -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=$USER +\end{verbatim} +\normalsize + + +Beachten Sie bitte, dass der Vorteil der Verwendung der obigen Konfiguration f\"{u}r den Anfang darin liegt, dass hierbei alles in ein einziges Verzeichnis geschrieben wird, welches sp\"{a}ter gel\"{o}scht werden kann, wenn Sie die Beispiele des n\"{a}chsten Kapitels ausgef\"{u}hrt und gelernt haben wie Bacula funktioniert. Ausserdem kann das Obige auch ohne root-Rechte installiert und ausgef\"{u}hrt werden. + +Um den Entwicklern die Arbeit zu erleichtern, haben wir dem Verzeichnis {\bf examples} ein {\bf defaultconfig}-Skript beigef\"{u}gt. Diese Skript enth\"{a}lt alle Statements, die man normalerweise benutzt und jeder Entwickler oder Benutzer kann sie nach seinen Bed\"{u}rfnissen ver\"{a}ndern. In diesem Verzeichnis sind auch andere n\"{u}tzliche Beispiele. + +Die \textbf{./configure}-Schalter {\bf \verb{--{enable-conio} oder {\bf \verb{--{enable-readline} sind n\"{u}tzlich, da man dadurch eine Kommandozeilen-History und ein Editorfunktionen f\"{u}r die Kommandozeile des Console-Programms erh\"{a}lt. Wenn Sie eine dieser Optionen verwenden, ben\"{o}tigen Sie beim Linken entweder das {\bf termcap}- oder das {\bf ncurses}-Paket. Auf manchen Systemen wie z.B. ``SuSE'' ist die termcap-Bibliothek nicht im Verzeichnis der Standard-Bibliotheken. Daher kann diese Option wirkungslos sein oder Sie erhalten folgende Fehlermeldung + +\footnotesize +\begin{verbatim} +/usr/lib/gcc-lib/i586-suse-linux/3.3.1/.../ld: +cannot find -ltermcap +collect2: ld returned 1 exit status +\end{verbatim} +\normalsize + +w\"{a}hrend Sie die Bacula-Console kompilieren. In diesem Fall m\"{u}sssen Sie die {\bf +LDFLAGS}-Umgebungsvariable vor der Kompilierung wie folgt setzen: + +\footnotesize +\begin{verbatim} +export LDFLAGS="-L/usr/lib/termcap" +\end{verbatim} +\normalsize + +Die gleichen Erfordernisse an die Systembibliothek gelten, wenn sie die ``Readline''-Subroutinen f\"{u}r das Editieren und die History der Kommandozeile benutzen wollen oder eine MySQL-Bibliothek, die Verschl\"{u}sselung erfordert. Wenn Sie Verschl\"{u}sselung ben\"{o}tigen, k\"{o}nnen Sie entweder die entsprechenden zus\"{a}tzlichen Bibliotheks-Pfade wie oben gezeigt setzen oder wie unten gezeigt direkt in der Befehlzeile des Befehls mit angeben. + +\footnotesize +\begin{verbatim} +LDFLAGS="-lssl -lcyrpto" \ + ./configure \ + +\end{verbatim} +\normalsize + +Auf manchen Systemen wie Mandriva neigt ``readline'' dazu, die Eingaben zu verst\"{u}mmeln, was es v\"{o}llig unbrauchbar macht. Wenn das bei Ihnen geschieht, w\"{a}hlen Sie die Option ab oder, wenn Sie Version 1.33 oder h\"{o}her verwenden, versuchen Sie mit der Option \verb{--{enable-conio den eingebauten ``readline''-Ersatz zu verwenden. Auch hierzu werden Sie entweder die ``termcap''- oder ``ncurses''-Bibliothek ben\"{o}tigen, doch es ist unwahrscheinlich, dass das {\bf conio}-Paket Ihre Eingaben dann verst\"{u}mmelt. + +``readline'' wird ab Version 1.34. nicht weiter unterst\"{u}tzt. Der Code ist noch verf\"{u}gbar und wenn Benutzer daf\"{u}r Patches schicken, wird es mir ein Vergn\"{u}gen sein, diese einzubauen. Da jedoch jede Version von ``readline'' mit den Vorg\"{a}ngerversionen inkompatibel zu sein scheint und zwischen den Systemen wesentliche Unterschiede bestehen, kann ich es mir nicht mehr l\"{a}nger leisten, es zu unterst\"{u}tzen. + +\section{Welches Datenbanksystem soll verwendet werden?} +\label{DB} +\index[general]{Welches Datenbanksystem soll verwendet werden? } +\index[general]{verwenden!Welches Datenbanksystem } + +Vor der Kompilierung von Bacula m\"{u}ssen Sie sich entscheiden, ob Sie SQLite, MySQL oder +PostgreSQL verwenden werden. Wenn bei Ihnen nicht sowieso schon MySQl oder PostgrSQL l\"{a}uft, empfehlen wir versuchsweise mit SQLite zu beginnen. Dies wird Ihnen die Einrichtung wesentlich erleichtern, da SQLite in Bacula hineinkompiliert wird und keine Administration erfordert. Es hat hat eine ganz ordentliche Performanz und ist f\"{u}r kleine bis mittlere Installationen gut geeignet (maximal 10 bis 20 Rechner). +Allerdings sollten wir erw\"{a}hnen, dass einige unserer Benutzer mit SQLite unerkl\"{a}rliche Datenbankkorruptionen hatten. F\"{u}r ein Produktiv-System empfehlen wir daher die Installation von MySQL oder PostgreSQL: + +Wenn Sie f\"{u}r den Bacula-Catalog MySQL verwenden wollen, lesen Sie bitte das Kapitel \ilink{MySQL installieren und konigurieren}{_ChapterStart} in diesem Handbuch. Sie werden hierzu MySQL installieren m\"{u}ssen, bevor Sie Bacula konfigurieren. MySQL ist ein Datenbanksystem von hoher Qualit\"{a}t, das sehr effizient arbeitet und f\"{u}r Installationen jeder Gr\"{o}{\ss}e geeignet ist. Seine Einrichtung und Administration sind ein wenig komplizierter als die von SQLite, da es einige Besonderheiten wie userids und Passw\"{o}rter bietet. Es l\"{a}uft als eigenst\"{a}ndiger Prozess, ist wirklich professionell und kommt mit Datenbanken jeder Gr\"{o}{\ss}e zurecht. + +Wenn Sie PostgreSQL als Bacula-Catalog verwenden wollen, lesen Sie bitte das Kapitel \ilink{PostgreSQL installieren und konfigurieren}{_ChapterStart10} in diesem Handbuch. Bevor Bacula konfiguriert wird, muss PostgreSQl installiert sein. Es ist MySQL sehr \"{a}hnlich, dabei aber eher etwas mehr SQL92-kompatibel und hat viele Features wie ``Transaktionen'', ``Stored Procedures'' und \"{a}hnliches. Man braucht eine gewisses Erfahrung, um es zu installieren und zu warten. + +Wenn Sie als Bacula Catalog SQLite verwenden wollen, lesen Sie bitte das Kapitel \ilink{SQLite installieren und konfigurieren}{_ChapterStart33} in diesem Handbuch. + +\section{Quick Start} +\index[general]{Quick Start } +\index[general]{Start!Quick } + +Unten werden nun einige Optionen und wichtige Vor\"{u}berlegungen ausgef\"{u}hrt, die Sie jedoch f\"{u}r den Moment \"{u}berspringen k\"{o}nnen, wenn Sie mit der vereinfachten Konfiguration, wie sie oben gezeigt wurde, keine Probleme hatten. + +Falls der ``./configure''-Prozess bestimmte Bibliotheken (z.B. ``libintl'') nicht findet, vergewissern Sie sich, dass das entsprechende Paket auf Ihrem Rechner installiert ist. Wenn das Paket an einem Ort installiert ist, denn Bacula nicht erwartet, kann in der Regel mit einem der im Folgenden aufgef\"{u}hrten Optionsschalter ein Suchpfad \"{u}bergeben werden. "./configure {-}{-}help" liefert eine Liste aller Optionen. Das letzte Mittel ist, ein Feature durch einen entsprechenden Optionschalter zu deaktivieren (z.B. ``{-}{-}disable-nls''). + +Wenn Sie richtig loslegen wollen, empfehlen wir, zum n\"{a}chsten Kapitel weitergehen und das Beispielprogramm zum Laufen zu bringen. Es wird Sie viel \"{u}ber Bacula lehren und kann zum Ausprobieren in ein einzelnes Verzeichnis installiert (um es auf einfache Art wieder l\"{o}schen zu k\"{o}nnen) und ohne root-Rechte betrieben werden. Wenn irgendwelche Probleme auftreten oder Sie richtig installieren wollen, kehren Sie zu diesem Kapitel zur\"{u}ck und lesen Sie die Einzelheiten, die nun folgen. + +\section{Konfigurationsoptionen} +\label{Options} +\index[general]{Optionen!der Konfiguration } +\index[general]{Konfigurationsoptionen } + +Um Ihre Installation anzupassen, hat der {\bf configure}-Befehl die folgenden Kommandozeilen-Schalter. + +\begin{description} + +\item [{-}{-}sysbindir=\lt{}Pfad/zu/den/Programmdateien\gt{}] + \index[general]{--sysbindir } +Legt fest, in welches Verzeichnis die Bacula Programmdateien bei Ausf\"{u}hrung des {\bf make install}-Befehls installiert werden. + +\item [{-}{-}sysconfdir=\lt{}Pfad/zu/den/Konfigurationsdateien\gt{}] + \index[general]{--sysconfdir } + Legt fest, in welches Verzeichnis die Bacula Konfigurationsdateien bei Ausf\"{u}hrung des {\bf make install}-Befehls installiert werden. + +\item [ {-}{-}mandir=\lt{}path\gt{}] + \index[general]{{-}{-}mandir} +Vorgabem\"{a}{\ss}ig installiert Bacula eine einfache Unix-manpage in ``/usr/share/man''. Soll die manpage an einen anderen Ort, k\"{o}nnen Sie mit dieser Option einen Pfad setzen. Beachten Sie bitte, dass die Bacula-Handb\"{u}cher (HTML- und PDF-Dateien) Bestandteil eines eigenen tar-Files sind, das nicht Bestandteil des Quellcode-Releases ist. + +\item [ {-}{-}datadir=\lt{}path\gt{}] + \index[general]{{-}{-}datadir} +Wenn Sie Bacula oder Teile davon \"{u}bersetzen wollen, k\"{o}nnen Sie die ``{\bf + {-}{-}datadir}''-Option verwenden um den Speicherort der ``po''-Dateien festzulegen. Die ``po''-Dateien m\"{u}ssen ``von Hand'' installiert werden, da Bacula dies (noch) nicht automatisch tut. + +\item [{-}{-}enable-smartalloc ] + \index[general]{--enable-smartalloc } + Damit wird der ``Smartalloc orphaned buffer detection code'' mit eingebunden. Diese Option ist dringend empfohlen. Da wir nie ohne diese Option kompilieren, werden Sie vielleicht Probleme haben, wenn sie nicht gesetzt ist. Wir empfehlen dringend, diesen Schalter gesetzt zu lassen, da er hilft, Memory-Leaks zu entdecken. Dieser Konfigurationsparameter wird bei der Kompilierung von Bacula benutzt. + +\item [{-}{-}enable-gnome ] + \index[general]{--enable-gnome } + Ist auf Ihrem Computer GNOME installiert und wollen Sie das grafische GNOME-Interface benutzen, setzen Sie diesen Schalter. Dadurch wird alles im Verzeichnis {\bf src/gnome-console} kompiliert. + +\item [{-}{-}enable-wx-console ] + \index[general]{--enable-wx-console } + Wenn auf Ihrem Rechner wxWidgets installiert ist und sie das grafische wxWidgets Console-Interface benutzen wollen, m\"{u}ssen Sie diesen Schalter setzen. Hierdurch wird alles im Verzeichnis {\bf src/wx-console} kompiliert. Dies kann auch f\"{u}r Benutzer hilfreich sein, die eine grafische Konsole benutzen, aber GNOME nicht installieren wollen, da wxWidgets mit GTK+-, Motif- und sogar X11-Bibliotheken l\"{a}uft + +\item [{-}{-}enable-tray-monitor ] + \index[general]{--enable-tray-monitor } +Wenn Sie auf Ihrem Rechner GTK installiert haben und eine grafische Umgebung oder einen Window-Manager benutzen, der dem Standard f\"{u}r die System-Tray von FreeDesktop entspricht (wie KDE oder GNOME) und wenn sie Ihre GUI benutzen wollen, um die Bacula-D\"{a}monen zu \"{u}berwachen, sollten sie diesen Schalter setzen. Ist er gesetzt, wird alles im Verzeichnis {\bf src/tray-monitor} kompiliert. + +\item [{-}{-}enable-static-tools] + \index[general]{--enable-static-tools } +Durch Setzen dieses Schalters werden die Hilfsprogramme des Storage-D\"{a}mons ({\bf bls}, {\bf bextract}, and {\bf bscan}) statisch gelinkt. Dadurch kann man sie auch verwenden, ohne dass die gemeinsamen Bibliotheken geladen sind. Wenn beim Linken Probleme im Verzeichnis {\bf src/stored} auftreten, sollten sie sich vergewissern, dass diese Option nicht gesetzt ist. Sie k\"{o}nnen durch Setzen des Schalters {\bf \verb{--{disable-static-tools} das statische Linken auch explizit unterdr\"{u}cken. + +\item [{-}{-}enable-static-fd] + \index[general]{--enable-static-fd } +Durch diese Option kompiliert der make-Prozess zus\"{a}tzlich zum Standard File-D\"{a}mon einen statischen Bacula File-D\"{a}mon. Diese statische Version hat alle ben\"{o}tigten Bibliotheken statisch gelinkt und wird f\"{u}r eine Notfallwiederherstellung auf einer leeren Festplatte verwendet. Diese Option kann meistens durch den Befehl {\bf make static-bacula-fd} ersetzt werden, den man im Verzeichnis {\bf src/filed} ausf\"{u}hren kann. Daneben ist auch die unten beschriebene Option {\bf \verb{--{enable-client-only} n\"{u}tzlich, wenn man nur einen einzelnen Client kompilieren will und die \"{u}brigen Programmteile nicht. + +Wird ein statisches Programm gelinkt, ben\"{o}tigt der Linker alle verwendeten Bibliotheken in statischen Versionen. Benutzer, die diese Option h\"{a}ufiger verwenden, werden auch h\"{a}ufiger Linker-Fehler haben. Als Erstes sollte man dann \"{u}berpr\"{u}fen, ob auf dem System eine statische ``glibc''-Bibliothek installiert ist. Als n\"{a}chstes sollte man `./configure'' ohne die Optionen {\bf {-}{-}openssl} und {\bf {-}{-}with-python} aufrufen, da hierbei zus\"{a}tzliche Bibliotheken ben\"{o}tigt werden. Man kann diese Optionen verwenden, doch muss man dann zus\"{a}tzliche statische Bibliotheken laden. + +\item [{-}{-}enable-static-sd] + \index[general]{--enable-static-sd } +Damit wird zus\"{a}tzlich zum Standard-Storage-D\"{a}mon ein statischer Storage-D\"{a}mon kompiliert. Die statische Version hat die Bibliotheksfunktionen fest eingebaut und ist bei der Datenwiederherstellung im Notfall hilfreich. + +Wird ein statisches Programm gelinkt, ben\"{o}tigt der Linker alle verwendeten Bibliotheken in statischen Versionen. Benutzer, die diese Option h\"{a}ufiger verwenden, werden auch h\"{a}ufiger Linker-Fehler haben. Als Erstes sollte man dann \"{u}berpr\"{u}fen, ob auf dem System eine statische ``glibc''-Bibliothek installiert ist. Als n\"{a}chstes sollte man `./configure'' ohne die Optionen {\bf {-}{-}openssl} und {\bf {-}{-}with-python} aufrufen, da hierbei zus\"{a}tzliche Bibliotheken ben\"{o}tigt werden. Man kann diese Optionen verwenden, doch muss man dann zus\"{a}tzliche statische Bibliotheken laden. + +\item [{-}{-}enable-static-dir] + \index[general]{--enable-static-dir } +Damit wird zus\"{a}tzlich zum Standard-Director ein statischer Director kompiliert. Die statische Version hat die Bibliotheksfunktionen fest eingebaut und ist bei der Datenwiederherstellung im Notfall hilfreich. + +Wird ein statisches Programm gelinkt, ben\"{o}tigt der Linker alle verwendeten Bibliotheken in statischen Versionen. Benutzer, die diese Option h\"{a}ufiger verwenden, werden auch h\"{a}ufiger Linker-Fehler haben. Als Erstes sollte man dann \"{u}berpr\"{u}fen, ob auf dem System eine statische ``glibc''-Bibliothek installiert ist. Als n\"{a}chstes sollte man `./configure'' ohne die Optionen {\bf {-}{-}openssl} und {\bf {-}{-}with-python} aufrufen, da hierbei zus\"{a}tzliche Bibliotheken ben\"{o}tigt werden. Man kann diese Optionen verwenden, doch muss man dann zus\"{a}tzliche statische Bibliotheken laden. + +\item [{-}{-}enable-static-cons] + \index[general]{--enable-static-cons } +Damit werden zus\"{a}tzlich zur Standard-Console eine statische Console und statische GNOME-Console kompiliert. Die statischen Versionen haben die Bibliotheksfunktionen fest eingebaut und sind bei der Datenwiederherstellung im Notfall hilfreich. + +Wird ein statisches Programm gelinkt, ben\"{o}tigt der Linker alle verwendeten Bibliotheken in statischen Versionen. Benutzer, die diese Option h\"{a}ufiger verwenden, werden auch h\"{a}ufiger Linker-Fehler haben. Als Erstes sollte man dann \"{u}berpr\"{u}fen, ob auf dem System eine statische ``glibc''-Bibliothek installiert ist. Als n\"{a}chstes sollte man `./configure'' ohne die Optionen {\bf {-}{-}openssl} und {\bf {-}{-}with-python} aufrufen, da hierbei zus\"{a}tzliche Bibliotheken ben\"{o}tigt werden. Man kann diese Optionen verwenden, doch muss man dann zus\"{a}tzliche statische Bibliotheken laden. + +\item [{-}{-}enable-client-only] + \index[general]{--enable-client-only } +Durch Setzen dieses Schalters werden nur der File-D\"{a}mon und die von ihm ben\"{o}tigten Bibliotheken kompiliert. Keiner der anderen D\"{a}monen, nicht die Sicherungswerkzeuge oder die Console werden kompiliert. Daher wird mit dem Befehl {\bf make install} auch nur der File-D\"{a}mon installiert. Um alle D\"{a}monen zu kompilieren, m\"{u}ssen Sie eine Konfiguration ohne diese Option verwenden. Mit dieser Option wird die Kompilierung nur eines Client-Prozesses auf einem Client-Rechner sehr erleichtert. + +Wird ein statisches Programm gelinkt, ben\"{o}tigt der Linker alle verwendeten Bibliotheken in statischen Versionen. Benutzer, die diese Option h\"{a}ufiger verwenden, werden auch h\"{a}ufiger Linker-Fehler haben. Als Erstes sollte man dann \"{u}berpr\"{u}fen, ob auf dem System eine statische ``glibc''-Bibliothek installiert ist. Als n\"{a}chstes sollte man `./configure'' ohne die Optionen {\bf {-}{-}openssl} und {\bf {-}{-}with-python} aufrufen, da hierbei zus\"{a}tzliche Bibliotheken ben\"{o}tigt werden. Man kann diese Optionen verwenden, doch muss man dann zus\"{a}tzliche statische Bibliotheken laden. + +\item [{-}{-}enable-largefile] + \index[general]{--enable-largefile } +Mit diesem Schalter (voreingestellt) wird Bacula mit der Unterst\"{u}tzung f\"{u}r 64 Bit breite Adressen kompiliert, sofern dies Ihr Rechner unterst\"{u}tzt. Damit kann Bacula Dateien lesen und schreiben, die gr\"{o}{\ss}er sind als 2 GBytes. Dieses Feature kann durch setzen des Schalters {\bf \verb{--{disable-largefile} abgew\"{a}hlt werden. Damit sind nur 32 Bit breite Adressen m\"{o}glich. + +\item [ {-}{-}disable-nls] + \index[general]{{-}{-}disable-nls} +Vorgabem\"{a}{\ss}ig verwendet Bacula ``GNU Native Language Support''-Bibliotheken (NLS). Auf manchen Rechnern sind diese Bibliotheken nicht verf\"{u}gbar oder funktionieren nicht richtig (beonders auf nicht-Linux Implementierungen). In diesen F\"{a}llen kann man durch Setzen von {\bf {-}{-}disable-nls} die Verwendung dieser Bibliotheken unterbinden. In diesem Fall benutzt Bacula Englisch. + +\item [{-}{-}with-sqlite=\lt{}Pfad/zu/SQLite\gt{}] + \index[general]{--with-sqlite } +Mit dieser Option wird die Benutzung eines SQlite-Datenbanksystems erm\"{o}glicht. Da Bacula an einem Standard-Speicherort ({\bf depkgs/sqlite}) sucht, wird der Pfad {\bf sqlite-path} normalerweise nicht angegeben. N\"{a}heres hierzu im Kapitel \ilink{SQLite installieren and konfigurieren }{_ChapterStart33} in diesem Handbuch. +Beachten Sie auch den Hiinweis zur Option ``{-}{-}with-postgresql''. + +\item [ {-}{-}with-sqlite3=\lt{}Pfad/zu/sqlite3\gt{}] + \index[general]{{-}{-}with-sqlite3 } +Dies erlaubt die Verwendung von SQLite in der Version 3.x. Der Pfad ({\bf sqlite3-path}) muss normalerweise nicht gesetzt werden, da Bacula die ben\"{o}tigten Komponenten an den Standardspeicherorten ({\bf depkgs/sqlite3}) sucht. Im Kapitel \ilink{SQLite installieren und konfigurieren}{_ChapterStart33} dieses Handbuches finden sie weitere Einzelheiten. + +\item [{-}{-}with-mysql=\lt{}Pfad/zu/MySQL\gt{}] + \index[general]{--with-mysql } +Mit dieser Option werden die Catalog-Dienste f\"{u}r Bacula kompiliert. Sie setzt voraus, dass MySQL bereits auf Ihrem Rechner l\"{a}uft, und erwartet, dass es im Verzeichnis, das Sie mit der Pfadangabe ({\bf mysql-path}) angeben, installiert ist. Wenn dieser Schalter nicht gesetzt ist, wird Bacula automatisch den Code der internen Bacula-Datenbank einbeziehen. Nach M\"{o}glichkeit empfehlen wir, diesen Schalter zu setzen. Wenn Sie ihn verwenden, installieren Sie bitte zuerst MySQL und lesen das Kapitel \ilink{MySQL installieren and konfigurieren }{_ChapterStart} in diesem Handbuch bevor Sie mit der Konfiguration fortfahren. + +\item [{-}{-}with-postgresql=\lt{}Pfad/zu/PostgreSQL\gt{}] + \index[general]{--with-postgresql } +Dieser Schalter erfordert die Angabe des Pfades zum PostgreSQL-Programmverzeichnis, da Bacula ihn nicht von selbst finden kann. Zur Kompilierung mit PostgreSQL verwendet man einfach {\bf {-}{-}with-postgresql}. + +Um Bacula richtig zu konfigurieren, muss eine der vier unterst\"{u}tzten Datenbank-Optionen spezifiziert sein. Entweder also +``{-}{-}with-sqlite'', ``{-}{-}with-sqlite3'', ``{-}{-}with-mysql'' oder `{-}{-}with-postgresql''. Andernfalls wird der ``./configure''-Prozess fehlschlagen. + +\item [ {-}{-}with-openssl=\lt{}path\gt{}] +Diese Schalter wird ben\"{o}tigt, wenn Bacula TLS (ssl) verwenden soll. In der Regel muss der Pfad nicht spezifiziert werden, da der Konfigurationsprozess die OpenSSL-Bibliotheken an deren Standardorten sucht. Wenn OpenSSL aktiviert ist, gestattet Bacula eine sichere Kommunikation zwischen seinen D\"{a}monprozessen. Weitere Informationen zur Verwendung von TLS im Kapitel \ilink{Bacula TLS}{_ChapterStart61} in diesem Handbuch. + +\item [ {-}{-}with-python=\lt{}Pfad/zu/Python\gt{}] + \index[general]{{-}{-}with-python } +Mit diese Option wird die Bacula-Unterst\"{u}tzung f\"{u}r Python aktiviert. Wird kein Pfad mit angegeben, sucht der Konfigurationsprozess Bibliotheken an den Standard-Installationsorten von Python 2.2., 2.3 und 2.4. Wird die Bibliothek nicht gefunden, muss die Option mit dem Pfad zum Verzeichnis Ihrer Python-Bibliotheken aufgerufen werden. Im Kapitel \ilink{Python}{_ChapterStart60} sind Einzelheiten dazu, wie man Python-Scripting verwenden kann. + +\item [ {-}{-}with-libintl-prefix=\lt{}DIR\gt{}] + \index[general]{{-}{-}with-libintl-prefix} +Mit dieser Option durchsucht Bacula die Verzeichnisse ``DIR/include'' und ``DIR/lib'' nach den ``libintl''-Headern und -Bibliotheken, die es f\"{u}r den ``Native Language Support'' (NLS) ben\"{o}tigt. + +\item [{-}{-}enable-conio] + \index[general]{--enable-conio } +Teilt Bacula mit, die kleine, leichtgewichtige, ``readline'' ersetzende Routine zu kompilieren. Diese ist im allgemeinen sehr viel einfacher zu konfigurieren als ``readline'', ben\"{o}tigt aber entweder die ``termcap''- oder ``ncurses''-Bibliothek. + +\item [{-}{-}with-readline=\lt{}Pfad/zu/readline\gt{}] + \index[general]{--with-readline } +Teilt Bacula mit, wo {\bf readline} installiert ist. Sofern es Teil der Standard-Bibliothek ist, findet Bacula normalerweise ``readline''. Wird es nicht gefunden, und ist der Schalter \verb{--{with-readline gesetzt, wird readline deaktiviert. Diese Option betrifft Baculas Kompilierung. Mit Readline ist im der Console-Programm eine History und ein Editieren der Kommandozeile m\"{o}glich. Readline wird nicht mehr unterst\"{u}tzt. Sie sind daher bei Problemen auf sich allein gestellt. + +\item [ {-}{-}enable-readline] + \index[general]{--enable-readline } +Damit wird Bacula mitgeteilt, die Readline-Unterst\"{u}tung zu erm\"{o}glichen. Das Paket scheint sich in inkompatibler Weise von Version zu Version zu \"{a}ndern. Daher ist wegen der Vielzahl der Konfigurationsprobleme dieser Schalter normalerweise nicht gesetzt. + +\item [{-}{-}with-tcp-wrappers=\lt{}Pfad/zur/TCP-Wrapper/Bibliothek\gt{}] + \index[general]{--with-tcp-wrappers } +Damit wird spezifiziert, dass Bacula mit TCP-Wrappern (man hosts\_access(5)) kompiliert werden soll. Die Angabe des Pfades ist optional, da Bacula die Bibliotheken an den Standard-Speicherorten findet. Diese Option betrifft Baculas Kompilierung. Wenn Sie bei der Spezifikation der Einschr\"{a}nkungen in ihren {\bf /etc/hosts.allow}- und {\bf /etc/hosts.deny}-Dateien die {\bf twist}-Option (hosts\_options(5)) verwenden, wird sich der Bacula-Prozess beenden. +Beachten Sie bitte, dass Sie beim Einrichten Ihrer {\bf /etc/hosts.allow}- und {\bf /etc/hosts.deny}-Dateien die infrage kommenden Bacula-D\"{a}monen mit deren Namen aus der Konfigurationsdatei und nicht mit deren jeweiligen Programmnamen bezeichnen. + +Weitere Informationen zur Konfiguration und zum Test der TCP-Wrapper im Abschnitt \ilink{TCP Wrapper konfigurieren und testen}{wrappers} des Kapitels zur Sicherheit. + +\item [{-}{-}with-working-dir=\lt{}Pfad/zum/Arbeitsverzeichnis\gt{} ] + \index[general]{--with-working-dir } +Die Angabe dieser Option ist zwingend und spezifizert das Verzeichnis, in welches Bacula zwischen seinen Ausf\"{u}hrungen seine Dateien sichert. Wenn z.B. die interne Datenbank verwendet wird, werden deren Dateien hier abgelegt. Diese Option wird nur benutzt, um die Konfigurationsdateien der D\"{a}monen zu ver\"{a}ndern. Das Gleiche erreichen Sie, wenn Sie die Konfigurationsdateien nachtr\"{a}glich \"{a}ndern. Das Arbeitsverzeichnis wird bei der Installation nicht automatisch erstellt, so dass Sie sicherstellen m\"{u}ssen, dass es vor der ersten Benutzung von Bacula vorhanden ist. + +\item [{-}{-}with-base-port=\lt{}Port=Nummer\gt{}] + \index[general]{--with-base-port } +Um funktioniern zu k\"{o}nnen, ben\"{o}tigt Bacula drei TCP/IC-Ports (einen f\"{u}r die Bacula-Console, einen f\"{u}r den Storage-D\"{a}mon und einen f\"{u}r den File-D\"{a}mon). Die Direktive {\bf \verb{--{with-baseport} weist automatisch drei Port Nummern zu, die mit der Basisadresse beginnen, die Sie spezifizieren. Auch in den sich ergebenden Konfigurationsdateien k\"{o}nnen Sie die Portnummern \"{a}ndern. Sie m\"{u}ssen jedoch aufpassen, dass die Nummern in allen drei Konfigurationsdateien genau \"{u}bereinstimmen. Der Vorgabe-Basisport hat die Nummer 9101. Damit sind die Ports 9101 bis 9103 zugewiesen. Diese Portnummern (9101, 9102, 9103) wurden von der IANA Bacula offiziell zugeteilt. Durch Setzen dieser Option ver\"{a}ndern Sie nur die Konfigurationsdateien. Diese k\"{o}nnen Sie auch nach der Installation noch ver\"{a}ndern. + +\item [{-}{-}with-dump-email=\lt{}E-mail-Adresse\gt{}] + \index[general]{--with-dump-email } +Dieser Schalter spezifiziert die E-Mail-Adresse an die alle ``core dumps'' gesendet werden und wird normalerweise nur von Entwicklern verwendet. + +\item [{-}{-}with-pid-dir=\lt{}Pfad\gt{} ] + \index[general]{--with-pid-dir } +Damit wird jenes Verzeichnis spezifiziert, in welchem Bacula die Datei mit den Prozess-IDs w\"{a}hrend seiner Ausf\"{u}hrung ablegt. Vorgabem\"{a}{\ss}ig ist dies {\bf /var/run}. Diese Verzeichnis wird bei der Installation nicht angelegt. Daher sollten Sie sicher sein, dass es vorhanden ist, bevor Sie Bacula zum ersten Mal verwenden. + +\item [{-}{-}with-subsys-dir=\lt{}Pfad\gt{}] + \index[general]{--with-subsys-dir } +Dieser Schalter spezifiziert den Ort, an dem Bacula die Subsystem-Lock Datei w\"{a}hrend seiner Ausf\"{u}hrung ablegt. Vorgabe ist {\bf /var/run/subsys}. Stellen Sie sicher, dass sie hierf\"{u}r und das {\bf sbindir}-Verzeichnis nicht das gleiche Verzeichnis spezifizieren. Dieses Verzeichnis wird nur innerhalb der Autostart-Skripten verwendet. Das ``subsys''-Verzeichnis wird bei Baculas Installation nicht erstellt, so dass Sie selbst sicherstellen m\"{u}ssen, dass es erstellt ist, bevor Sie Bacula verwenden. + +\item [{-}{-}with-dir-password=\lt{}Passwort\gt{}] + \index[general]{--with-dir-password } +Mit diesem Schalter kann ein Passwort f\"{u}r den Zugang (in der Regel \"{u}ber das Console-Programm) zum Director spezifiziert werden. Ist der Schalter nicht gesetzt, generiert der Konfigurationsprozess ein zuf\"{a}lliges Passwort. + +\item [{-}{-}with-fd-password=\lt{}Passwort\gt{} ] + \index[general]{--with-fd-password } +Mit diesem Schalter kann ein Passwort f\"{u}r den Zugang zum File-D\"{a}mon spezifiziert werden (normalerweise vom Director aufgerufen). Wenn es nicht spezifiziert wurde, generiert der Konfigurationsprozess ein zuf\"{a}lliges Passwort. + +\item [{-}{-}with-sd-password=\lt{}Passwort\gt{} ] + \index[general]{--with-sd-password } +Mit diesem Schalter kann ein Passwort f\"{u}r den Zugang zum Storage-D\"{a}mon spezifiziert werden (normalerweise vom Director aufgerufen). Wenn es nicht spezifiziert wurde, generiert der Konfigurationsprozess ein zuf\"{a}lliges Passwort. + +\item [{-}{-}with-dir-user=\lt{}User\gt{} ] + \index[general]{--with-dir-user } + +Durch Setzen dieses Schalters kann die User-ID festgelegt werden unter welcher der Director l\"{a}uft. Der Director-Prozess muss als root gestartet werden, doch muss er nicht unter root laufen. Nach den ersten Initialisierungen kann er dem User \"{u}bergeben werden, dessen ID Sie hier spezifizieren. + +\item [ {-}{-}with-dir-group=\lt{}Group\gt{} ] + \index[general]{--with-dir-group } + +Durch Setzen dieses Schalters kann die Group-ID festgelegt werden unter welcher der Director l\"{a}uft. Der Director-Prozess muss als root gestartet werden, doch muss er nicht unter root laufen. Nach den ersten Initialisierungen kann er der Gruppe \"{u}bergeben werden, deren ID Sie hier spezifizieren. + +\item [{-}{-}with-sd-user=\lt{}User\gt{} ] + \index[general]{--with-sd-user } +Mit diesem Schalter kann die User-ID festgelegt werden unter welcher der Storage-D\"{a}mon l\"{a}uft. Der Storage-D\"{a}mon muss als root gestartet werden, doch muss er nicht unter root laufen. Nach den ersten Initialisierungen kann er dem User \"{u}bergeben werden, dessen ID Sie hier spezifizieren. Wenn Sie diese Option verwenden, m\"{u}ssen Sie auch sicherstellen, dass der Storageprozess alle Ger\"{a}te(Bandlaufwerke, usw.) verwenden darf, die er ben\"{o}tigt. + +\item [{-}{-}with-sd-group=\lt{}Group\gt{} ] + \index[general]{--with-sd-group } +Durch Setzen dieses Schalters kann die Group-ID festgelegt werden unter welcher der Storage-D\"{a}mon l\"{a}uft. Der Storage-D\"{a}mon muss als root gestartet werden, doch muss er nicht unter root laufen. Nach den ersten Initialisierungen kann er der Gruppe \"{u}bergeben werden, deren ID Sie hier spezifizieren. + +\item [{-}{-}with-fd-user=\lt{}User\gt{} ] + \index[general]{--with-fd-user } +Durch Setzen dieses Schalters kann die User-ID festgelegt werden unter welcher der File-D\"{a}mon l\"{a}uft. Der File-D\"{a}mon muss als root gestartet werden und muss in den meisten F\"{a}llen auch unter root laufen. In ganz besonderen F\"{a}llen kann mit dieser Option der File-D\"{a}mon-Prozess nach den ersten Initialisierungen einem User \"{u}bergeben werden, dessen ID Sie hier spezifizieren. + +\item [{-}{-}with-fd-group=\lt{}Group\gt{} ] + \index[general]{--with-fd-group } +Durch Setzen dieses Schalters kann die Group-ID festgelegt werden unter welcher der File-D\"{a}mon l\"{a}uft. Der File-D\"{a}mon muss als root gestartet werden und muss in den meisten F\"{a}llen auch unter root laufen. Trotzdem kann der File-D\"{a}mon-Prozess nach den ersten Initialisierungen der Gruppe \"{u}bergeben werden, deren ID Sie hier spezifizieren. + +\end{description} + +Beachten Sie bitte, dass durch Eingabe von {\bf ./configure \verb{--{help} noch viele andere Optionen angezeigt werden, diese aber bislang nicht implementiert sind. + +\section{Optionen, die wir f\"{u}r die meisten Systeme empfehlen} +\index[general]{Systeme!Empfohlenen Optionen f\"{u}r die meisten } +\index[general]{Optionen, die wir f\"{u}r die meisten Systeme empfehlen } + +Wir empfehlen f\"{u}r die meisten Systeme mit folgenden Optionen zu beginnen: + +\footnotesize +\begin{verbatim} +./configure \ + --enable-smartalloc \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/working +\end{verbatim} +\normalsize + +Wenn Sie Bacula lieber in ein Installationsverzeichnis installieren wollen, als es aus seinem Kompilationsverzeichnis heraus zu betreiben (wie es Entwickler tun) m\"{u}ssen Sie den Schalter \verb{--{sbindir and \verb{--{sysconfdir mit den entsprechenden Pfaden verwenden. Dies ist nicht notwendig, wenn Sie ``make install'' nicht verwenden, wie es meistens bei der Programm-Entwicklung der Fall ist. Der Installationsprozess erzeugt die mit ``sbindir'' und ``sysconfdir'' angegebenen Verzeichnisse, aber nicht jene, die als ``pid-dir'', ``subsys-dir'' oder ``working-dir'' spezifiziert wurden. Sie m\"{u}ssen selbst sicherstellen, dass diese existieren, bevor Bacula das erste Mal l\"{a}uft. Es folgt ein Beispiel daf\"{u}r wie Kern das tut. + +\section{RedHat} +\index[general]{RedHat } + +Bei der Verwendung von SQLite: + +\footnotesize +\begin{verbatim} + +CFLAGS="-g -Wall" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --enable-smartalloc \ + --with-sqlite=$HOME/bacula/depkgs/sqlite \ + --with-working-dir=$HOME/bacula/working \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --enable-gnome \ + --enable-conio +\end{verbatim} +\normalsize + +oder + +\footnotesize +\begin{verbatim} + +CFLAGS="-g -Wall" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --enable-smartalloc \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/working + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working + --enable-gnome \ + --enable-conio +\end{verbatim} +\normalsize + +oder, zum Schluss, eine vollst\"{a}ndig traditionelle RedHat-Linux Installation: + +\footnotesize +\begin{verbatim} +CFLAGS="-g -Wall" ./configure \ + --prefix=/usr \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc/bacula \ + --with-scriptdir=/etc/bacula \ + --enable-smartalloc \ + --enable-gnome \ + --with-mysql \ + --with-working-dir=/var/bacula \ + --with-pid-dir=/var/run \ + --enable-conio +\end{verbatim} +\normalsize +Beachten Sie bitte, dass Bacula davon ausgeht, dass die Verzeichnisse +/var/bacula, /var/run, und /var/lock/subsys bereits existieren und es diese +w\"{a}hrend der Installation nicht automatisch erzeugt. + +Beachten Sie bitte, dass bei Benutzung einer AMD64 CPU, die unter 64 bit CentOS4 l\"{a}uft, mit gcc (GCC) 4.0.1 20050727 (Red Hat 4.0.1-5) ein Compiler Bug auftritt, so dass Code erzeugt wird, der eine Segmentverletzung verusacht. Typischerweise macht sich dies zuerst beim Storage-D\"{a}mon bemerkbar. Eine L\"{o}sung ist es, Bacula ohne Optimierung zu kompilieren (normalerweise ist dies -O2). + +\section{Solaris} +\index[general]{Solaris } + +Um Bacula aus den Quellcodedateien zu erzeugen, muss auf dem Solaris-System bereits das folgende installiert sein (das ist es standardm\"{a}{\ss}ig nicht): +libiconv, gcc 3.3.2, stdc++, libgcc (wegen der stdc++- und gcc\_s-Bibliotheken), make 3.8 oder neuer. + +M\"{o}glicherweise muss die PATH-Umgebungsvariable um ``/usr/local/bin'' und ``/usr/ccs/bin'' (wegen ar) erg\"{a}nzt werden + +\footnotesize +\begin{verbatim} +#!/bin/sh +CFLAGS="-g" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-mysql=$HOME/mysql \ + --enable-smartalloc \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-working-dir=$HOME/bacula/working +\end{verbatim} +\normalsize + +Wie oben schon erw\"{a}hnt, erzeugt der Installationsprozess die mit ``sbindir'' und ``sysconfdir'' bezeichneten Verzeichnisse, falls sie nicht schon vorhanden sind. Die Verzeichnisse ``pid-dir'', ``subsys-dir'' und ``working-dir'' werden nicht automatisch erzeugt. Vergewissern Sie sich daher, dass sie existieren, bevor Bacula zum ersten Mal laufen soll. + +Beachten Sie bitte, dass Sie m\"{o}glicherweise die folgenden Pakete installieren m\"{u}ssen, um Bacula kompilieren zu k\"{o}nnen: +\footnotesize +\begin{verbatim} +SUNWbinutils, +SUNWarc, +SUNWhea, +SUNWGcc, +SUNWGnutls +SUNWGnutls-devel +SUNWGmake +SUNWgccruntime +SUNWlibgcrypt +SUNWzlib +SUNWzlibs +SUNWbinutilsS +SUNWGmakeS +SUNWlibm + +export +PATH=/usr/bin::/usr/ccs/bin:/etc:/usr/openwin/bin:/usr/local/bin:/usr/sfw/bin:/opt/sfw/bin:/usr/ucb:/usr/sbin +\end{verbatim} +\normalsize + +\section{FreeBSD} +\index[general]{FreeBSD } + +Unter \elink{The FreeBSD Diary}{http://www.freebsddiary.org/bacula.php} gibt es eine detailierte Beschreibung wie Bacula unter diesem Betriebssystem intalliert wird. Benutzer von FreeBSD, die eine Version von vor 4.9-STABLE (Montag, 29. Dezember 2003, 15:18:01 UTC) verwenden, sollten das Kapitel \ilink{Test der Bandlaufwerke}{FreeBSDTapes} in diesem Handbuch lesen. Darin sind \textbf{wichtige} Informationen, wie man das Bandlaufwerk so konfiguriert, dass es mit Bacula zusammenarbeitet. + +Wenn Sie Bacula zusammen mit MySQL verwenden, sollten Sie darauf achten, MySQL eher mit den Thread-Bibliotheken von FreeBSD als mit denen von Linux zu kompilieren, weil Bacula selbst normalerweise so kompiliert wird. Eine Mischung von Beiden wird m\"{o}glicherweise nicht funktionieren. + +\section{Win32} +\index[general]{Win32 } +Um die Win32-Version des File-Client zu installieren, lesen Sie bitte das Kapitel \ilink{Win32 Installation}{_ChapterStart7} in diesem Handbuch. + +\section{Windows-Systeme mit installiertem CYGWIN} +\label{Win32} +\index[general]{Windows-Systeme mit installiertem CYGWIN } +\index[general]{CYGWIN!Windows-Systeme mit installiertem } + +Seit der Version 1.34 verwendet Bacula f\"{u}r den Win32-File-D\"{a}mon CYGWIN nicht mehr. Er wird allerdings immer noch in einer CYGWIN-Umgebung kompiliert - m\"{o}glicherweise funktioniert das aber auch mit dem Visual C -Studio allein. Wenn Sie den Win32-File-D\"{a}mon selbst kompilieren wollen, ben\"{o}tigen sie Microsoft C++ in der Version 6.0 oder h\"{o}her. F\"{u}r Bacula in den Versionen vor 1.3 wurde CYGWIN verwendet. Einzelheiten zur Kompilierung stehen in der README-Datei im Verzeichnis ``src/win32''. + +Beachten Sie, dass, obwohl sich fast alle Elemente von Bacula unter Windows kompilieren lassen, nur der File-D\"{a}mon getestet und verwendet wurde. + +Beachten Sie auf jeden Fall die Installationsanweisungen des Kapitels \ilink{Win32-Installation}{_ChapterStart7} in diesem Dokument. + +\section{Kerns Konfigurations-Skript} +\index[general]{Skript!Kerns Konfigurations } +\index[general]{Kerns Konfigurations-Skript } + +Dieses Skript verwende ich f\"{u}r meinen ``produktiven'' Linux-Rechner: + +\footnotesize +\begin{verbatim} +#!/bin/sh +# This is Kern's configure script for Bacula +CFLAGS="-g -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --enable-smartalloc \ + --enable-gnome \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=$USER \ + --with-smtp-host=mail.your-site.com \ + --with-baseport=9101 +exit 0 +\end{verbatim} +\normalsize + +Beachten Sie bitte, dass ich 9101 als Basis-Port definiere. Dadurch verwendet Bacula Port 9101 f\"{u}r die Director-Console, Port 9102 f\"{u}r die File-D\"{a}monen und Port 9103 f\"{u}r die Storage-D\"{a}monen. Diese Ports m\"{u}ssten auf allen Systemen verf\"{u}gbar sein, da sie von der IANA (Internet Assigned Numbers Authority) offiziell f\"{u}r Bacula reserviert wurden. Wir raten dringend, nur diese Ports zu verwenden, um Konflikte mit anderen Programmen zu vermeiden. Wenn Sie die Option {\bf \verb{--{with-baseport} nicht verwenden, ist dies die Voreinstellung. + +Eventuell k\"{o}nnen Sie auch noch das Folgende in Ihre {\bf /etc/services}-Datei eintragen, was das Erkennen der Verbindungen, die Bacula verwendet, erleichtert (z.B. mit netstat -a): + +\footnotesize +\begin{verbatim} +bacula-dir 9101/tcp +bacula-fd 9102/tcp +bacula-sd 9103/tcp +\end{verbatim} +\normalsize + +\section{Bacula installieren} +\index[general]{installieren!Bacula } +\index[general]{Bacula installieren } + +Bevor man die Konfigurations-Dateien bearbeitet, wird man Bacula in dessen Zielverzeichnisse installieren wollen. Dies geschieht mit: + +\footnotesize +\begin{verbatim} +make install +\end{verbatim} +\normalsize + +Wenn Bacula zuvor schon installiert worden war, werden die Programmdateien \"{u}berschrieben werden, die Konfigurationsdateien jedoch erhalten bleiben. An die Namen der ``neuen'' Konfigurationsdateien wird ein {\bf .new} angeh\"{a}ngt. Wenn Sie Bacula bereits installiert und betrieben hatten, werden Sie diese normalerweise verwerfen wollen oder ignorieren. + +\section{Einen File-D\"{a}mon oder Client-Prozess kompilieren} +\index[general]{Kompilierung!eines File-D\"{a}mons oder Client-Prozesses } +\index[general]{Einen File-D\"{a}mon oder Client-Prozess kompilieren } + +Wenn der Director und Storage-D\"{a}mon bei Ihnen auf einem Rechner l\"{a}uft und Sie die Daten eines anderen Rechners sichern wollen, brauchen Sie auf diesem Rechner eine Kopie des File-D\"{a}mons. Sind der Rechner und das Betriebsystem gleich, gen\"{u}gt es, die Programmdatei {\bf bacula-fd} und die Konfigurationsdatei {\bf bacula-fd.conf} zu kopieren und dann den Name und das Passwort in der Konfigurationsdatei anzupassen, sodass diese eindeutig sind. Die entsprechenden Erweiterungen muss man auch in der Konfigurationsdatei des Directors ({\bf bacula-dir.conf}) machen. + +Ist die Rechnerachitektur und/oder das Betriebsystem verschieden, so muss der File-D\"{a}mon auf dem Client-Rechner kompiliert werden. Man verwendet hierzu den gleichen {\bf ./configure}-Befehl wie f\"{u}r das Hauptprogramm und beginnt in einer neuen Kopie des Quellcode-Verzeichnisses oder indem man vor dem {\bf ./configure} ein {\bf make\ distclean} ausf\"{u}hrt. + +Da der File-D\"{a}mon nicht mit der Datenbank arbeitet, k\"{o}nnen die Optionen {\bf \verb{--{with-mysql} oder {\bf \verb{--{with-sqlite} entfernt werden. Durch die Verwendung des Schalters {\bf \verb{--{enable-client-only} werden nur die ben\"{o}tigten Bibliotheken und die Client-Programme erzeugt. Dadurch ist es nicht notwendig, die Datenbank-Programme zu installieren, nur um den File-D\"{a}mon zu erzeugen. Geben Sie zum Schluss einfach {\bf make} ein. Damit wird nur das Client-Programm erzeugt. + +\label{autostart} +\section{Auto-Start der D\"{a}mon-Prozesse} +\index[general]{D\"{a}mon-Prozesse!Auto-Start der } +\index[general]{Auto-Start der D\"{a}mon-Prozesse } + +Sollen die D\"{a}mon-Prozesse beim Booten Ihres Systems automatisch gestartet bzw. beendet werden (was sinnvoll ist), ist ein weiterer Schritt erforderlich. Als erstes muss der ``./configure''-Prozess Ihr System erkennen - es muss also unterst\"{u}tzt werden und darf nicht als {\bf unknown} erkannt sein. Dann m\"{u}ssen die plattformspezifischen Dateien wie folgt installiert werden: + +\footnotesize +\begin{verbatim} +(become root) +make install-autostart +\end{verbatim} +\normalsize + +Die M\"{o}glichkeit des Auto-Starts ist nur f\"{u}r Systeme implementiert, die wir offiziell unterst\"{u}tzen (momentan FreeBSD, RedHat/Fedora-Linux und Solaris) und wurde bislang nur auf Fedora-Linux vollst\"{a}ndig getestet. + +Mit dem Befehl {\bf make install-autostart} werden die entsprechenden Start-Skripte zusammen mit den notwendigen symbolischen Links installiert. Unter RedHat-Linux sind diese Skripte in den Verzeichnisssen {\bf /etc/rc.d/init.d/bacula-dir}, {\bf /etc/rc.d/init.d/bacula-fd} und {\bf /etc/rc.d/init.d/bacula-sd}. Der genaue Speicherort h\"{a}ngt vom verwendeten Beriebssystem ab. + +Wenn nur der File-D\"{a}mon installiert werden soll, k\"{o}nnen Sie dies mit folgendem Befehl tun: + +\footnotesize +\begin{verbatim} +make install-autostart-fd +\end{verbatim} +\normalsize + +\section{Weitere Hinweise zur Kompilierung} +\index[general]{Kompilierung!Weitere Hinweise zur } +\index[general]{Weitere Hinweise zur Kompilierung } + +Um eine Programmdatei in einem beliebigen Verzeichnis zu erzeugen, geben Sie einfach das folgende ein: + +\footnotesize +\begin{verbatim} +make +\end{verbatim} +\normalsize + +Um alle Objekt- und Programmdateien (auch die mit ``1'', ``2'' oder ``3'' bezeichneten Dateien, die Kern als tempor\"{a}re Dateien verwendet) geben Sie folgendes ein: + +\footnotesize +\begin{verbatim} +make clean +\end{verbatim} +\normalsize + +Um wirklich alles f\"{u}r eine Distribution zu bereinigen: + +\footnotesize +\begin{verbatim} +make distclean +\end{verbatim} +\normalsize + +Beachten Sie bitte, dass dies alle Makefiles l\"{o}scht und normalerweise auf der obersten Verzeichnisebene ausgef\"{u}hrt wird, um den Quellcode f\"{u}r eine Distribution vorzubereiten. Um dies r\"{u}ckg\"{a}ngig zu machen, muss {\bf ./configure} auch von der obersten Verzeichnisebene ausgef\"{u}hrt werden, da alle Makefiles gel\"{o}scht sind. + +Um einem Unterverzeichnis eine neue Datei hinzuzuf\"{u}gen, muss die Datei ``Makefile.in'' in jenem Verzeichnis bearbeitet werden. Danach gen\"{u}gt es, {\bf make} einzugeben. In den meisten F\"{a}llen erzeugt der make-Befehl ein neues Makefile aus ``Makefile.in''. In manchen F\"{a}llen muss der {\bf make}-Befehl wiederholt werden. In extremen F\"{a}llen wechselt man in die oberste Verzeichnisebene und gibt ein: {\bf make Makefiles}. + +Um Abh\"{a}ngigkeiten hinzuzuf\"{u}gen: + +\footnotesize +\begin{verbatim} +make depend +\end{verbatim} +\normalsize + +Mit {\bf make depend} werden die Abh\"{a}ngigkeiten der Header-Dateien aller Objekt-Dateien dem Makefile und der Datei ``Makefile.in" hinzugef\"{u}gt. Dieser Befehl sollte in allen Verzeichnissen ausgef\"{u}hrt werden, in welchen Sie die Abh\"{a}ngigkeiten \"{a}ndern. Normalerweise muss der Befehl nur ausgef\"{u}hrt werden, wenn sie Quell- oder Header-Dateien hinzuf\"{u}gen oder l\"{o}schen. {\bf make depend} wird normalerweise w\"{a}hrend des Konfigurations-Prozesses automatisch aufgerufen. + +Um zu installieren: + +\footnotesize +\begin{verbatim} +make install +\end{verbatim} +\normalsize + +Dieser Befehl wird verwendet, wenn Sie Bacula als Backup-System installieren wollen, nicht aber wenn Sie an Bacula selbst programmieren. +Nach Ausf\"{u}hren des Befehls {\bf make install} werden die folgenden Dateien auf Ihrem System installiert (mehr oder weniger). Welche Dateien und Verzeichnisse es im einzelnen sind, h\"{a}ngt von Ihrem {\bf ./configure}-Befehl ab (wird z.B. GNOME nicht konfiguriert, wird auch ``gnome-console'' und ``gnome-console.conf'' nicht installiert. Wenn Sie SQLite anstatt MySQL verwenden, werden einige der Dateien andere sein). + +\footnotesize +\begin{verbatim} +bacula +bacula-dir +bacula-dir.conf +bacula-fd +bacula-fd.conf +bacula-sd +bacula-sd.conf +bacula-tray-monitor +tray-monitor.conf +bextract +bls +bscanBacula +btape +btraceback +btraceback.gdb +bconsole +bconsole.conf +create_mysql_database +dbcheck +delete_catalog_backup +drop_bacula_tables +drop_mysql_tables +fd +gnome-console +gnome-console.conf +make_bacula_tables +make_catalog_backup +make_mysql_tables +mtx-changer +query.sql +bsmtp +startmysql +stopmysqlBacula +wx-console +wx-console.conf +\end{verbatim} +\normalsize + +\label{monitor} + +\section{Die Installation des Tray-Monitors} +\index[general]{Tray-Monitors!Die Installation des } +\index[general]{Die Installation des Tray-Monitors } + +Wenn Sie den Konfigurationsschalter {\bf \verb{--{enable-tray-monitor} verwendet und {\bf make install} ausgef\"{u}hrt haben, ist der Tray-Monitor schon installiert. + +Da Sie Ihre grafische Umgebung nicht als root betreiben (wenn doch, sollten sie das abstellen), m\"{u}ssen Sie den Usern Leserechte auf {\bf tray-monitor.conf} und Ausf\"{u}hrungsrechte f\"{u}r {\bf bacula-tray-monitor} geben. Dies ist kein Sicherheitsrisiko. + +Melden Sie sich bei Ihrer grafischen Umgebung an (KDE, Gnome oder eine andere), starten sie den {\bf bacula-tray-monitor} als gew\"{o}hnlicher Benutzer und achten Sie darauf, ob das Cassetten-Icon irgendwo auf Ihrem Bildschirm erscheint (gew\"{o}hnlich in der Task-Leiste). Tut es das nicht, werfen Sie einen Blick auf die unten aufgef\"{u}hrten Anweisungen entsprechend Ihrer Umgebung oder Ihres Window-Managers. + +\subsubsection*{GNOME} +\index[general]{GNOME } + +Ein System-Tray oder einen ``Benachrichtigungs-Bereich'' (um die GNOME-Terminologie zu verwenden), wird von GNOME seit der Version 2.2 unterst\"{u}tzt. Um sie zu aktivieren, klicken Sie rechts auf Ihre Kontrollleiste, \"{o}ffnen das Men\"{u} {\bf Add to this Panel}, dann auf {\bf Utility} und klicken schlie{\ss}lich auf {\bf Notification Area}. + +\subsubsection*{KDE} +\index[general]{KDE } + +Seit der Version 3.1 unterst\"{u}tzt KDE das System-Tray. Um es zu aktivieren, klicken Sie Ihre Kontrollleiste rechts, \"{o}ffnen das Men\"{u} {\bf Zur Kontrollleiste hinzuf\"{u}gen}, dann auf {\bf Miniprogramm} und klicken schlie{\ss}lich auf {\bf Systemabschnitt der Kontrollleiste}. + +\subsubsection*{Andere Fenster-Manager} +\index[general]{Fenster-Manager!Andere } +\index[general]{Andere Fenster-Manager } + +Lesen Sie die Dokumentation um zu erfahren, ob der Freedesktop System-Tray-Standard von Ihrem Fenster-Manager unterst\"{u}tzt wird und - wenn vorhanden - wie er aktiviert wird. + +\section{Die Bacula Konfigurations-Dateien bearbeiten} +\index[general]{Die Bacula Konfigurations-Dateien bearbeiten } +\index[general]{Bearbeiten!Die Bacula Konfigurations-Dateien } + +Schlagen Sie im Kapitel \ilink{Bacula konfigurieren}{_ChapterStart16} dieses Handbuches nach, wie sie die Konfigurationsdateien von Bacula einrichten. diff --git a/docs/manuals/de/install/latex2html-init.pl b/docs/manuals/de/install/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/install/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/install/messagesres.tex b/docs/manuals/de/install/messagesres.tex new file mode 100644 index 00000000..e6002d9d --- /dev/null +++ b/docs/manuals/de/install/messagesres.tex @@ -0,0 +1,372 @@ +%% +%% + +\chapter{Messages Resource} +\label{MessagesChapter} +\index[general]{Resource!Messages} +\index[general]{Messages Resource} + +The Messages resource defines how messages are to be handled and destinations +to which they should be sent. + +Even though each daemon has a full message handler, within the File daemon and +the Storage daemon, you will normally choose to send all the appropriate +messages back to the Director. This permits all the messages associated with a +single Job to be combined in the Director and sent as a single email message +to the user, or logged together in a single file. + +Each message that Bacula generates (i.e. that each daemon generates) has an +associated type such as INFO, WARNING, ERROR, FATAL, etc. Using the message +resource, you can specify which message types you wish to see and where they +should be sent. In addition, a message may be sent to multiple destinations. +For example, you may want all error messages both logged as well as sent to +you in an email. By defining multiple messages resources, you can have +different message handling for each type of Job (e.g. Full backups versus +Incremental backups). + +In general, messages are attached to a Job and are included in the Job report. +There are some rare cases, where this is not possible, e.g. when no job is +running, or if a communications error occurs between a daemon and the +director. In those cases, the message may remain in the system, and should be +flushed at the end of the next Job. However, since such messages are not +attached to a Job, any that are mailed will be sent to {\bf +/usr/lib/sendmail}. On some systems, such as FreeBSD, if your sendmail is in a +different place, you may want to link it to the the above location. + +The records contained in a Messages resource consist of a {\bf destination} +specification followed by a list of {\bf message-types} in the format: + +\begin{description} + +\item [destination = message-type1, message-type2, message-type3, ... ] +\index[dir]{destination} +\end{description} + +or for those destinations that need and address specification (e.g. email): + +\begin{description} + +\item [destination = address = message-type1, message-type2, + message-type3, ... ] +\index[dir]{destination} + + Where {\bf destination} is one of a predefined set of keywords that define + where the message is to be sent ({\bf stdout}, {\bf file}, ...), {\bf + message-type} is one of a predefined set of keywords that define the type of + message generated by {\bf Bacula} ({\bf ERROR}, {\bf WARNING}, {\bf FATAL}, + ...), and {\bf address} varies according to the {\bf destination} keyword, but + is typically an email address or a filename. +\end{description} + +The following are the list of the possible record definitions that can be used +in a message resource. + +\begin{description} + +\item [Messages] +\index[dir]{Messages} + Start of the Messages records. + +\item [Name = \lt{}name\gt{}] +\index[dir]{Name} + The name of the Messages resource. The name you specify here will be used to + tie this Messages resource to a Job and/or to the daemon. + +\label{mailcommand} +\item [MailCommand = \lt{}command\gt{}] +\index[dir]{MailCommand} + In the absence of this resource, Bacula will send all mail using the + following command: + +{\bf mail -s "Bacula Message" \lt{}recipients\gt{}} + +In many cases, depending on your machine, this command may not work. Using +the {\bf MailCommand}, you can specify exactly how to send the mail. During +the processing of the {\bf command}, normally specified as a quoted string, +the following substitutions will be used: + +\begin{itemize} +\item \%\% = \% +\item \%c = Client's name +\item \%d = Director's name +\item \%e = Job Exit code (OK, Error, ...) +\item \%i = Job Id +\item \%j = Unique Job name +\item \%l = Job level +\item \%n = Job name +\item \%r = Recipients +\item \%t = Job type (e.g. Backup, ...) + \end{itemize} + +The following is the command I (Kern) use. Note, the whole command should +appear on a single line in the configuration file rather than split as is +done here for presentation: + +{\bf mailcommand = "/home/kern/bacula/bin/bsmtp -h mail.example.com -f +\textbackslash{}"\textbackslash{}(Bacula\textbackslash{}) +\%r\textbackslash{}" -s \textbackslash{}"Bacula: \%t \%e of \%c +\%l\textbackslash{}" \%r"} + +Note, the {\bf bsmtp} program is provided as part of {\bf Bacula}. For +additional details, please see the +\ilink{ bsmtp -- Customizing Your Email Messages}{bsmtp} section of +the Bacula Utility Programs chapter of this manual. Please test any {\bf +mailcommand} that you use to ensure that your bsmtp gateway accepts the +addressing form that you use. Certain programs such as Exim can be very +selective as to what forms are permitted particularly in the from part. + +\item [OperatorCommand = \lt{}command\gt{}] +\index[fd]{OperatorCommand} + This resource specification is similar to the {\bf MailCommand} except that + it is used for Operator messages. The substitutions performed for the {\bf + MailCommand} are also done for this command. Normally, you will set this + command to the same value as specified for the {\bf MailCommand}. + +\item [\lt{}destination\gt{} = \lt{}message-type1\gt{}, + \lt{}message-type2\gt{}, ...] + \index[fd]{\lt{}destination\gt{}} + +Where {\bf destination} may be one of the following: + +\begin{description} + +\item [stdout] + \index[fd]{stdout} + Send the message to standard output. + +\item [stderr] + \index[fd]{stderr} + Send the message to standard error. + +\item [console] + \index[console]{console} + Send the message to the console (Bacula Console). These messages are held +until the console program connects to the Director. +\end{description} + +\item {\bf \lt{}destination\gt{} = \lt{}address\gt{} = + \lt{}message-type1\gt{}, \lt{}message-type2\gt{}, ...} + \index[console]{\lt{}destination\gt{}} + +Where {\bf address} depends on the {\bf destination}. + +The {\bf destination} may be one of the following: + +\begin{description} + +\item [director] + \index[dir]{director} + \index[general]{director} + Send the message to the Director whose name is given in the {\bf address} + field. Note, in the current implementation, the Director Name is ignored, and + the message is sent to the Director that started the Job. + +\item [file] +\index[dir]{file} +\index[general]{file} + Send the message to the filename given in the {\bf address} field. If the + file already exists, it will be overwritten. + +\item [append] +\index[dir]{append} +\index[general]{append} + Append the message to the filename given in the {\bf address} field. If the + file already exists, it will be appended to. If the file does not exist, it + will be created. + +\item [syslog] +\index[general]{syslog} + Send the message to the system log (syslog) using the facility specified in + the {\bf address} field. Note, for the moment, the {\bf address} field is + ignored and the message is always sent to the LOG\_DAEMON facility with + level LOG\_ERR. See {\bf man 3 syslog} for more details. Example: +\begin{verbatim} + syslog = all, !skipped +\end{verbatim} + +\item [mail] + \index[general]{mail} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field. Mail messages are grouped + together during a job and then sent as a single email message when the + job terminates. The advantage of this destination is that you are + notified about every Job that runs. However, if you backup five or ten + machines every night, the volume of email messages can be important. + Some users use filter programs such as {\bf procmail} to automatically + file this email based on the Job termination code (see {\bf + mailcommand}). + +\item [mail on error] + \index[general]{mail on error} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field if the Job terminates with an + error condition. MailOnError messages are grouped together during a job + and then sent as a single email message when the job terminates. This + destination differs from the {\bf mail} destination in that if the Job + terminates normally, the message is totally discarded (for this + destination). If the Job terminates in error, it is emailed. By using + other destinations such as {\bf append} you can ensure that even if the + Job terminates normally, the output information is saved. + +\item [mail on success] + \index[general]{mail on success} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field if the Job terminates + normally (no error condition). MailOnSuccess messages are grouped + together during a job and then sent as a single email message when the + job terminates. This destination differs from the {\bf mail} + destination in that if the Job terminates abnormally, the message is + totally discarded (for this destination). If the Job terminates in + normally, it is emailed. + +\item [operator] + \index[general]{operator} + Send the message to the email addresses that are specified as a comma + separated list in the {\bf address} field. This is similar to {\bf + mail} above, except that each message is sent as received. Thus there + is one email per message. This is most useful for {\bf mount} messages + (see below). + +\item [console] + \index[general]{console} + Send the message to the Bacula console. + +\item [stdout] + \index[general]{stdout} + Send the message to the standard output (normally not used). + +\item [stderr] + \index[general]{stderr} + Send the message to the standard error output (normally not used). + +\item [catalog] + \index[general]{catalog} + Send the message to the Catalog database. The message will be + written to the table named {\bf Log} and a timestamp field will + also be added. This permits Job Reports and other messages to + be recorded in the Catalog so that they can be accessed by + reporting software. Bacula will prune the Log records associated + with a Job when the Job records are pruned. Otherwise, Bacula + never uses these records internally, so this destination is only + used for special purpose programs (e.g. {\bf bweb}). + +\end{description} + + For any destination, the {\bf message-type} field is a comma separated + list of the following types or classes of messages: + +\begin{description} + +\item [info] + \index[general]{info} + General information messages. + +\item [warning] + \index[general]{warning} + Warning messages. Generally this is some unusual condition but not expected + to be serious. + +\item [error] + \index[general]{error} + Non-fatal error messages. The job continues running. Any error message should + be investigated as it means that something went wrong. + +\item [fatal] + \index[general]{fatal} + Fatal error messages. Fatal errors cause the job to terminate. + +\item [terminate] + \index[general]{terminate} + Message generated when the daemon shuts down. + +\item [notsaved] + \index[fd]{notsaved} + \index[general]{notsaved} + Files not saved because of some error. Usually because the file cannot be + accessed (i.e. it does not exist or is not mounted). + +\item [skipped] + \index[fd]{skipped} + \index[general]{skipped} + Files that were skipped because of a user supplied option such as an + incremental backup or a file that matches an exclusion pattern. This is + not considered an error condition such as the files listed for the {\bf + notsaved} type because the configuration file explicitly requests these + types of files to be skipped. For example, any unchanged file during an + incremental backup, or any subdirectory if the no recursion option is + specified. + +\item [mount] + \index[dir]{mount} + \index[general]{mount} + Volume mount or intervention requests from the Storage daemon. These + requests require a specific operator intervention for the job to + continue. + +\item [restored] + \index[fd]{restored} + \index[general]{restored} + The {\bf ls} style listing generated for each file restored is sent to + this message class. + +\item [all] + \index[general]{all} + All message types. + +\item [security] + \index[general]{security} + Security info/warning messages principally from unauthorized + connection attempts. + +\item [alert] + \index[general]{alert} + Alert messages. These are messages generated by tape alerts. + +\item [volmgmt] + \index[general]{volmgmt} + Volume management messages. Currently there are no volume mangement + messages generated. +\end{description} + +\end{description} + +The following is an example of a valid Messages resource definition, where +all messages except files explicitly skipped or daemon termination messages +are sent by email to enforcement@sec.com. In addition all mount messages +are sent to the operator (i.e. emailed to enforcement@sec.com). Finally +all messages other than explicitly skipped files and files saved are sent +to the console: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mail = enforcement@sec.com = all, !skipped, !terminate + operator = enforcement@sec.com = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize + +With the exception of the email address (changed to avoid junk mail from +robot's), an example Director's Messages resource is as follows. Note, the {\bf +mailcommand} and {\bf operatorcommand} are on a single line -- they had to be +split for this manual: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mailcommand = "bacula/bin/bsmtp -h mail.example.com \ + -f \"\(Bacula\) %r\" -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "bacula/bin/bsmtp -h mail.example.com \ + -f \"\(Bacula\) %r\" -s \"Bacula: Intervention needed \ + for %j\" %r" + MailOnError = security@example.com = all, !skipped, \ + !terminate + append = "bacula/bin/log" = all, !skipped, !terminate + operator = security@example.com = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/install/monitorconf.tex b/docs/manuals/de/install/monitorconf.tex new file mode 100644 index 00000000..20c70b9d --- /dev/null +++ b/docs/manuals/de/install/monitorconf.tex @@ -0,0 +1,341 @@ +%% +%% + +\chapter{Monitor Configuration} +\label{_MonitorChapter} +\index[general]{Monitor Configuration } +\index[general]{Configuration!Monitor } + +The Monitor configuration file is a stripped down version of the Director +configuration file, mixed with a Console configuration file. It simply +contains the information necessary to contact Directors, Clients, and Storage +daemons you want to monitor. + +For a general discussion of configuration file and resources including the +data types recognized by {\bf Bacula}, please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. + +The following Monitor Resource definition must be defined: + +\begin{itemize} +\item + \ilink{Monitor}{MonitorResource} -- to define the Monitor's + name used to connect to all the daemons and the password used to connect to +the Directors. Note, you must not define more than one Monitor resource in +the Monitor configuration file. +\item At least one + \ilink{Client}{ClientResource1}, + \ilink{Storage}{StorageResource1} or +\ilink{Director}{DirectorResource2} resource, to define the +daemons to monitor. +\end{itemize} + +\section{The Monitor Resource} +\label{MonitorResource} +\index[general]{Monitor Resource } +\index[general]{Resource!Monitor } + +The Monitor resource defines the attributes of the Monitor running on the +network. The parameters you define here must be configured as a Director +resource in Clients and Storages configuration files, and as a Console +resource in Directors configuration files. + +\begin{description} + +\item [Monitor] + \index[fd]{Monitor } + Start of the Monitor records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + Specify the Director name used to connect to Client and Storage, and the +Console name used to connect to Director. This record is required. + +\item [Password = \lt{}password\gt{}] + \index[fd]{Password } + Where the password is the password needed for Directors to accept the Console +connection. This password must be identical to the {\bf Password} specified +in the {\bf Console} resource of the +\ilink{Director's configuration}{DirectorChapter} file. This +record is required if you wish to monitor Directors. + +\item [Refresh Interval = \lt{}time\gt{}] + \index[fd]{Refresh Interval } + Specifies the time to wait between status requests to each daemon. It can't +be set to less than 1 second, or more than 10 minutes, and the default value +is 5 seconds. +% TODO: what is format of the time? +% TODO: should the digits in this definition be spelled out? should +% TODO: this say "time-period-specification" above??) +\end{description} + +\section{The Director Resource} +\label{DirectorResource2} +\index[general]{Director Resource } +\index[general]{Resource!Director } + +The Director resource defines the attributes of the Directors that are +monitored by this Monitor. + +As you are not permitted to define a Password in this resource, to avoid +obtaining full Director privileges, you must create a Console resource in the +\ilink{Director's configuration}{DirectorChapter} file, using the +Console Name and Password defined in the Monitor resource. To avoid security +problems, you should configure this Console resource to allow access to no +other daemons, and permit the use of only two commands: {\bf status} and {\bf +.status} (see below for an example). + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Director] + \index[fd]{Director } + Start of the Director records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Director name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Director's +configuration file. This record is required. + +\item [DIRPort = \lt{}port-number\gt{}] + \index[fd]{DIRPort } + Specify the port to use to connect to the Director. This value will most +likely already be set to the value you specified on the {\bf +\verb:--:with-base-port} option of the {\bf ./configure} command. This port must be +identical to the {\bf DIRport} specified in the {\bf Director} resource of +the +\ilink{Director's configuration}{DirectorChapter} file. The +default is 9101 so this record is not normally specified. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address used to connect to the Director. This record is required. +\end{description} + +\section{The Client Resource} +\label{ClientResource1} +\index[general]{Resource!Client } +\index[general]{Client Resource } + +The Client resource defines the attributes of the Clients that are monitored +by this Monitor. + +You must create a Director resource in the +\ilink{Client's configuration}{FiledConfChapter} file, using the +Director Name defined in the Monitor resource. To avoid security problems, you +should set the {\bf Monitor} directive to {\bf Yes} in this Director resource. + + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Client (or FileDaemon)] + \index[fd]{Client (or FileDaemon) } + Start of the Client records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Client name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Client's +configuration file. This record is required. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address in dotted quad notation for a Bacula File daemon. This record is +required. + +\item [FD Port = \lt{}port-number\gt{}] + \index[fd]{FD Port } + Where the port is a port number at which the Bacula File daemon can be +contacted. The default is 9102. + +\item [Password = \lt{}password\gt{}] + \index[fd]{Password } + This is the password to be used when establishing a connection with the File +services, so the Client configuration file on the machine to be backed up +must have the same password defined for this Director. This record is +required. +\end{description} + +\section{The Storage Resource} +\label{StorageResource1} +\index[general]{Resource!Storage } +\index[general]{Storage Resource } + +The Storage resource defines the attributes of the Storages that are monitored +by this Monitor. + +You must create a Director resource in the +\ilink{Storage's configuration}{StoredConfChapter} file, using the +Director Name defined in the Monitor resource. To avoid security problems, you +should set the {\bf Monitor} directive to {\bf Yes} in this Director resource. + + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Storage] + \index[fd]{Storage } + Start of the Storage records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Storage name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Storage's +configuration file. This record is required. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address in dotted quad notation for a Bacula Storage daemon. This record is +required. + +\item [SD Port = \lt{}port\gt{}] + \index[fd]{SD Port } + Where port is the port to use to contact the storage daemon for information +and to start jobs. This same port number must appear in the Storage resource +of the Storage daemon's configuration file. The default is 9103. + +\item [Password = \lt{}password\gt{}] + \index[sd]{Password } + This is the password to be used when establishing a connection with the +Storage services. This same password also must appear in the Director +resource of the Storage daemon's configuration file. This record is required. + +\end{description} + +\section{Tray Monitor Security} +\index[general]{Tray Monitor Security} + +There is no security problem in relaxing the permissions on +tray-monitor.conf as long as FD, SD and DIR are configured properly, so +the passwords contained in this file only gives access to the status of +the daemons. It could be a security problem if you consider the status +information as potentially dangerous (I don't think it is the case). + +Concerning Director's configuration: \\ +In tray-monitor.conf, the password in the Monitor resource must point to +a restricted console in bacula-dir.conf (see the documentation). So, if +you use this password with bconsole, you'll only have access to the +status of the director (commands status and .status). +It could be a security problem if there is a bug in the ACL code of the +director. + +Concerning File and Storage Daemons' configuration:\\ +In tray-monitor.conf, the Name in the Monitor resource must point to a +Director resource in bacula-fd/sd.conf, with the Monitor directive set +to Yes (once again, see the documentation). +It could be a security problem if there is a bug in the code which check +if a command is valid for a Monitor (this is very unlikely as the code +is pretty simple). + + +\section{Sample Tray Monitor configuration} +\label{SampleConfiguration1} +\index[general]{Sample Tray Monitor configuration} + +An example Tray Monitor configuration file might be the following: + +\footnotesize +\begin{verbatim} +# +# Bacula Tray Monitor Configuration File +# +Monitor { + Name = rufus-mon # password for Directors + Password = "GN0uRo7PTUmlMbqrJ2Gr1p0fk0HQJTxwnFyE4WSST3MWZseR" + RefreshInterval = 10 seconds +} + +Client { + Name = rufus-fd + Address = rufus + FDPort = 9102 # password for FileDaemon + Password = "FYpq4yyI1y562EMS35bA0J0QC0M2L3t5cZObxT3XQxgxppTn" +} +Storage { + Name = rufus-sd + Address = rufus + SDPort = 9103 # password for StorageDaemon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" +} +Director { + Name = rufus-dir + DIRport = 9101 + address = rufus +} +\end{verbatim} +\normalsize + +\subsection{Sample File daemon's Director record.} +\index[general]{Sample File daemon's Director record. } +\index[general]{Record!Sample File daemon's Director } + +Click +\ilink{here to see the full example.}{SampleClientConfiguration} + + +\footnotesize +\begin{verbatim} +# +# Restricted Director, used by tray-monitor to get the +# status of the file daemon +# +Director { + Name = rufus-mon + Password = "FYpq4yyI1y562EMS35bA0J0QC0M2L3t5cZObxT3XQxgxppTn" + Monitor = yes +} +\end{verbatim} +\normalsize + +\subsection{Sample Storage daemon's Director record.} +\index[general]{Record!Sample Storage daemon's Director } +\index[general]{Sample Storage daemon's Director record. } + +Click +\ilink{here to see the full example.}{SampleConfiguration} + +\footnotesize +\begin{verbatim} +# +# Restricted Director, used by tray-monitor to get the +# status of the storage daemon +# +Director { + Name = rufus-mon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" + Monitor = yes +} +\end{verbatim} +\normalsize + +\subsection{Sample Director's Console record.} +\index[general]{Record!Sample Director's Console } +\index[general]{Sample Director's Console record. } + +Click +\ilink{here to see the full +example.}{SampleDirectorConfiguration} + +\footnotesize +\begin{verbatim} +# +# Restricted console used by tray-monitor to get the status of the director +# +Console { + Name = Monitor + Password = "GN0uRo7PTUmlMbqrJ2Gr1p0fk0HQJTxwnFyE4WSST3MWZseR" + CommandACL = status, .status +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/install/quickstart.tex b/docs/manuals/de/install/quickstart.tex new file mode 100644 index 00000000..aa53365f --- /dev/null +++ b/docs/manuals/de/install/quickstart.tex @@ -0,0 +1,181 @@ +%% +%% + + +\chapter{Mit Bacula beginnen} +\label{QuickStartChapter} +\index[general]{Mit Bacula beginnen } + +Wenn Sie wie ich sind, wollen Sie dass Bacula sofort l\"{a}uft, damit Sie ein Gefühl für das Programm bekommen und sich erst sp\"{a}ter mit den Details befassen. Dieses Kapitel m\"{o}chte genau dieses erreichen: Das Programm ohne die ganzen Einzelheiten rasch zum Laufen zu bringen. Wenn Sie den Abschnitt über Pools, Volumes und Labels überspringen wollen, k\"{o}nnen Sie ihn sp\"{a}ter immer noch nachholen, aber lesen Sie bitte bis zum Ende des Kapitels und beachten Sie die Anweisungen zum Test Ihres Bandlaufwerkes genau. + +Wir gehen davon aus, dass Sie es geschafft haben, Bacula zu kompilieren und zu installieren. Wenn nicht, werfen sie vielleicht zuerst einen Blick auf die +\ilink{System-Anforderungen}{SysReqs} und dann auf das Kapitel +\ilink{Bacula kompilieren und installieren}{_ChapterStart17} in diesem Handbuch. + +\label{JobsandSchedules} +\section{Jobs und Zeitpl\"{a}ne verstehen} +\index[general]{Jobs!verstehen} +\index[general]{Zeitpl\"{a}ne!verstehen} + +Um Bacula so anpassungsf\"{a}hig wie m\"{o}glich zu machen, bestehen die Anweisungen, die sein Verhalten bestimmen, aus verschiedenen Teilen. Die wichtigste Direktive ist die Job-Resource, welche jeweils eine Sicherungsaufgabe beschreibt. Ein Sicherungs-Job besteht im allgemeinen aus einem FileSet, einem (Sicherungs-)Client und einem Zeitplan mit einer oder mehreren Arten und Zeiten der Sicherung, einem Pool und zus\"{a}tzlichen Instruktionen. Mit anderen Worten: Mit dem FileSet wird bestimmt was gesichert werden soll, mit dem Client, wer sichern soll, der Zeitplan bestimmt wann dies geschehen soll und der Pool wohin gesichert werden soll (z.B. auf welches Volume). +Typischerweise bestimmt jeweils eine Kombination FileSet/Client einen Job. Die meisten der Direktiven wie FileSets, Pools und Zeitpl\"{a}ne k\"{o}nnen für mehrere Jobs verwendet werden und so beliebig kombiniert werden. Sie k\"{o}nnten z.B. zwei verschiedene Job-Definitionen (resources) haben, welche die Daten verschiedener Server sichern, dabei aber den gleichen Zeitplan, das gleiche FileSet (auf beiden Rechnern werden die gleichen Verzeichnisse gesichert) und vielleicht sogar die gleichen Pools nutzen. Der Zeitplan wird festlegen, welche Art der Sicherung wann l\"{a}uft (z.B. Montags eine Vollsicherung, an den übrigen Wochentage inkrementielle Sicherung) und wenn mehr als ein Job den gleichen Zeitplan hat, wird die Job-Priorit\"{a}t bestimmen, welcher Job tats\"{a}chlich als erster l\"{a}uft. Wenn Sie viele Jobs haben, werden Sie m\"{o}glicherweise JobDefs benutzen wollen, in denen Sie Vorgaben für alle Jobs festlegen, die dann in den einzelnen Job-Resourcen individuell angepasst werden k\"{o}nnen, es Ihnen aber ersparen, für jeden Job die gleichen Parameter zu definieren. Zus\"{a}tzlich zu den durch die FileSets festgelegten zu sichernden Dateien sollte es auch einen Job geben, der Ihre Catalog-Dateien sichert. + +Schlie{\ss}lich gibt es neben den Sicherungs-Jobs Wiederherstellungs-Jobs, Verifikationen und administrative Jobs, die andere Direktiven erfordern. + +\label{PoolsVolsLabels} +\section{Pools, Volumes und Labels verstehen} +\index[general]{Verstehen!von Pools, Volumes und Labels} +\index[general]{Pools, Volumes und Labels verstehen } + +Wenn Sie bisher Programme wie {\bf tar} zur Datensicherung verwendet haben, werden Ihnen Begriffe Pools, Volumes und Label auf den ersten Blick vielleicht etwas verwirrend vorkommen. Ein Volume ist ein einzelnes physikalisches Band (oder m\"{o}glicherweise eine einzelne Datei), auf die Bacula die Daten Ihrer Sicherung schreibt. Pools sind Gruppen von Volumes, so dass eine Sicherung nicht auf die Gr\"{o}{\ss}e eines einzelnen Volumes (die L\"{a}nge eines Bandes) beschr\"{a}nkt ist. Daher werden Sie bei der Definition eines Job eher einen Pool anstatt einzelner Volumes spezifizieren. Bacula wird das n\"{a}chste verfügbare Volume dem Pool entnehmen und Sie auffordern, es zu mounten. + +W\"{a}hrend die grundlegenden Eigenschaften eines Pools in der Pool-Resource des Directors festgelegt sind, werden die Daten der realen Pools im Bacula-Catalog gehalten. Er enth\"{a}lt alle Informationen der Pool-Resourcen und auch die Informationen über alle Volumes, die einem Pool zugefügt wurden. Ein Volume wird normalerweise mit dem {\bf label}-Befehl des Konsolen-Proramms dem Pool hinzugefügt. + +Für jedes Volume h\"{a}lt Bacula eine ziehmliche Menge von Catalog-Informationen vor, wie z.B. den Zeitpunkt des ersten Lesens/Beschreibens, den Zeitpunkt des letzten Lesens/Beschreibens, die Anzahl der Dateien, die es enth\"{a}lt, die Anzahl der Mounts, usw. + +Bevor Bacula ein Volume beschreibt, muss das physikalische Volume eine digitale Kennzeichnung erhalten, damit Bacula sicher sein kann, dass das richtige Volumen gemountet ist. Dies erledigt normalerweise der {\bf label}-Befehl des Konsolen-Programms. + +Das Vorgehen, zuerst eine Pool zu schaffen, dann Volumes hinzuzufügen und die Volumes digital zu kennzeichnen, mag zu Anfang mühselig erscheinen, ist aber ganz einfach und erlaubt es, mehrere Volumes zu verwenden (anstatt auf die Speicherkapazi\"{a}t eines Bandes beschr\"{a}nkt zu sein). Durch Pools wird man bei der Sicherung auch ausgesprochen flexibel. Man kann sich z.B einen ``t\"{a}glichen'' Pool für inkrementielle und einen ``w\"{o}chentlichen'' Pool für Vollsicherungen anlegen. Sind bei der Definition der Sicherungsjobs die richtigen Pools angegeben, wird Bacula niemals einen Tagesjob in ein Volume des w\"{o}chentlichen Pools schreiben oder umgekehrt und Ihnen stets sagen, wann welches Band ben\"{o}tigt wird. + +Weiteres zu Pools im Abschnitt \ilink{Pool-Resource}{PoolResource} des Kapitels ``Director-Konfiguration''. Auch in diesem Kapitel werden wir sp\"{a}ter auf dieses Thema zurückkommen. + +\section{Baculas Konfigurations-Dateien einrichten} +\label{config} +\index[general]{Baculas Konfigurations-Dateien einrichten } +\index[general]{einrichten!von Baculas Konfigurations-Dateien } + +Wenn Sie Bacula zum ersten Mal verwenden, müssen Sie, nachdem Sie den entsprechenden {\bf ./configure}-Befehl, ein {\bf make} und ein {\bf make install} ausgeführt haben, gültige Konfigurationsdateien für den Director, den File-D\"{a}mon, den Storage-D\"{a}mon und die Console erstellen. Wenn Sie sich nach unseren Empfehlungen gerichtet haben, finden Sie in Ihrem Installationsverzeichnis sowohl Vorgabe-Konfigurationsdateien als auch die ausführbaren Dateien der D\"{a}monen. In jedem Fall sind die Programmdateien in jenem Verzeichnis, welches bei der Ausführung des {\bf ./configure}-Befehls mit der Option {\bf \verb{--{sbindir} und die Konfigurationsdateien in jenem Verzeichnis, welches mit der {\bf \verb{--{sysconfdir}-Option angegeben wurde. + + +Wenn Sie Bacula zum ersten Mal installieren, werden Sie etwas Zeit brauchen, um die Konfigurationsdateien so zu ver\"{a}ndern, dass Sie zu Ihrer Umgebung passen. Das wird mit sich bringen, dass Sie Bacula einige Male starten und wieder beenden müssen bis alles stimmt. Verzweifeln Sie nicht! Sind die Konfigurationsdateien einmal erstellt, werden Sie diese nur noch selten \"{a}ndern und auch Bacula nicht sehr oft starten oder stoppen müssen. Die meiste Arbeit wird darin bestehen, B\"{a}nder zu wechseln, wenn sie voll sind. + +\subsection{Die Konfiguration des Console-Programms} +\index[general]{Konfiguration des Console-Programms } +\index[general]{Console-Programm!die Konfiguration des } + +Das Condsole-Programm wird vom Administrator benutzt, um mit dem Director-Prozess zu interagiern und Jobs manuell zu starten und zu beenden oder Informationen zu einzelnen Jobs zu erhalten. + +Die Konfigurationsdatei der Console ist in jenem Verzeichnis, das mit der {\bf +\verb{--{sysconfdir}-Option bei der Ausführung des {\bf ./configure}-Befehl spezifiziert wurde und hei{\ss}t vorgabem\"{a}{\ss}ig {\bf console.conf}. + +Wenn Sie auch die GNOME-Console mit der {\bf \verb{--{enable-gnome}-Option kompiliert haben, finden Sie auch hierfür eine Vorgabe-Konfigurationsdatei die {\bf gnome-console.conf} hei{\ss}t. + +Gleiches gilt für die wxWidgets-Console, die mit der {\bf +\verb{--{enable-wx-console}-Option kompiliert wird und deren Vorgabe-Konfigurationdsdatei {\bf wx-console.conf} ist. + +Benutzen Sie Bacula zum ersten Mal, wüssen Sie diese Dateien nicht \"{a}ndern, da brauchbare Vorgabewerte schon gesetzt sind + +\subsubsection*{Die Konfiguration des Monitor-Programms} +\index[general]{Monitor-Programm!die Konfiguration des } +\index[general]{Konfiguration des Monitor-Programms } + +Das Monitor-Programm erscheint typischerweise als Icon in der Kontrollleiste. Wird dieses zu einem Fenster vergr\"{o}{\ss}ert, liefert es dem Administrator Informationen über den Director, den Sicherungsstatus des lokalen Rechners oder jeden anderen konfigurierten D\"{a}mon-Prozess. + +\addcontentsline{lof}{figure}{Bacula Tray Monitor} +\includegraphics{./Bacula-tray-monitor.eps} + +Die Abbildung zeigt ein Fenster des Tray-Monitors, der für drei D\"{a}mon-Prozesse konfiguriert wurde. Wenn man auf die Schaltfl\"{a}chen in der oberen rechten Ecke des Fensters klickt, sieht man den Zustand jedes einzelnen Prozesses. Die Abbildung zeigt den Zustand des momentan ausgew\"{a}hlten Storage-D\"{a}mons (MainSD). + +Die Konfigurationsdatei des Monitor-Programms befindet sich in jenem Verzeichnis, das bei Ausführung des {\bf ./configure}-Befehls mit der Option {\bf \verb{--{sysconfdir} angegeben wurde. In der Regel müssen Sie als Erstbenutzer die Berechtigung für diese Datei \"{a}ndern, um Benutzern, die keine root-Rechte haben, zu erlauben, den Monitor zu starten, da diese Anwendung unter dem gleichen Benutzer laufen muss wie die grafische Umgebung (vergessen Sie nicht, nicht-root-Benutzern die Ausführung von {\bf bacula-tray-monitor} zu erlauben). Solange Sie die Vorgabewerte verwenden, ist dies kein Sicherheitsproblem. + +\subsubsection*{Die Konfiguration des File-D\"{a}mon} +\index[general]{File-D\"{a}mon!die Konfiguration des} +\index[general]{Konfiguration des File-D\"{a}mon } + +Der File-D\"{a}mon ist ein Programm, das auf jedem (Client-)Rechner l\"{a}uft. Auf Anforderung des Directors sucht er die zu sichernden Dateien und schickt sie (bzw. ihre Daten) an den Storage-D\"{a}mon. + +Die Konfigurationsdatei des File-D\"{a}mon ist in jenem Verzeichnis, das bei Ausführung des {\bf ./configure}-Befehls mit der Option {\bf \verb{--{sysconfdir} angegeben wurde. Vorgabem\"{a}{\ss}ig hei{\ss}t diese Datei {\bf bacula-fd.conf}. Normalerweise muss für erste Versuche hier nichts ge\"{a}ndert werden, da vernünftige Vorgabewerte gesetzt sind. Will man allerdings die Daten von mehreren Rechnern sichern, muss auf jedem dieser Rechner ein File-D\"{a}mon mit einer eigenen Konfigurationsdatei installiert sein. Die Daten aller dieser File-D\"{a}mons müssen in der Konfigurationsdatei des Directors erscheinen. + +\subsubsection*{Die Konfiguration des Directors} +\index[general]{Director!die Konfiguration des } +\index[general]{Die Konfiguration des Directors} + +Der Director ist das zentrale Steuerungsprogramm aller anderen D\"{a}mon-Prozesse. Er terminiert und überwacht alle Sicherungsjobs. + +Die Konfigurationsdatei des Directors liegt in jenem Verzeichnis, das durch die Option {\bf \verb{--{sysconfdir} bei der Ausführung des {\bf ./configure}-Befehls angegeben wurde. Der Name dieser Konfigurationsdatei ist normalerweise {\bf bacula-dir.conf}. + +Im Allgemeinen muss darin nur die Ressource ``FileSet'' ge\"{a}ndert werden, so dass ihre {\bf Include}-Direktive mindestens eine Zeile mit einem gültigen Verzeichnis (oder einer Datei) enth\"{a}lt, die/das zu sichern ist. + +Wenn Sie kein DLT-Bandlaufwerk haben, werden Sie m\"{o}glicherweise die Storage-Resource \"{a}ndern wollen, so dass diese Ihrem tats\"{a}chlichen Sicherungsger\"{a}t mehr entspricht. Sie k\"{o}nnen hier immer die tats\"{a}chlichen Namen verwenden und k\"{o}nnen diese auch beliebig zuweisen, doch müssen sie mit jenen übereinstimmen, die in der Konfigurationsdatei des Storage-D\"{a}mon angegeben sind. + +M\"{o}glicherweise wollen Sie auch die E-Mailadresse zur Benachrichtigung von der Vorgabe {\bf root} auf Ihre eigene \"{a}ndern. + +Schlie{\ss}lich brauchen Sie, wenn Sie mehrere Rechner sichern wollen, für jedes System einen eigenen File-D\"{a}mon bzw. Client und müssen seinen Namen, seine Adresse und ein Passwort spezifizieren. Wir meinen, dass es die Fehlersuche sehr erleichtert, wenn wir den D\"{a}monen den Namen des Rechners geben und ein {\bf -fd} anh\"{a}ngen. Wenn Ihr Rechner also z.B. {\bf foobaz} hei{\ss}t, würden Sie den File-D\"{a}mon {\bf foobaz-fd} nennen. Der Director k\"{o}nnte {\bf foobaz-dir} hei{\ss}en und der Storage-D\"{a}mon {\bf foobaz-sd}. +Jede Ihrer Bacula-Komponenten \textbf{muss} einen eindeutigen Namen haben. Wenn Sie alle gleich benennen, werden Sie - abgesehen davon, dass sie nicht wissen werden, welcher D\"{a}mon Ihnen welche Botschaft schickt - eigenartige Fehlermeldungen erhalten, da die Namen ihrer Tempor\"{a}rdateien nicht eindeutig sind, sofern sie das gleiche Arbeitsverzeichnis benutzen. + +\subsubsection*{Die Konfiguration des Storage-D\"{a}mon} +\index[general]{Daemon!Configuring the Storage } +\index[general]{Die Konfiguration des Storage-D\"{a}mon} + +Auf Veranlassung des Director-Prozesses ist der Storage-D\"{a}mon für die Übernahme der Daten vom File-D\"{a}mon und ihrer Speicherung auf dem Sicherungsmedium verantwortlich, bzw. im Falle einer Wiederherstellung für das Finden und die Übergabe der Daten an den File-D\"{a}mon. + +Die Konfigurationsdatei der Storage-D\"{a}mons ist in dem Verzeichnis, das bei Ausführung des {\bf ./configure}-Befehls mit der {\bf \verb{--{sysconfdir}-Option angegeben wurde und hei{\ss}t vorgabem\"{a}{\ss}ig {\bf bacula-sd.conf}. Bearbeiten Sie diese Datei, damit sie die korrekten Archivierungsger\"{a}tenamen für jedes Ihrer Bandger\"{a}te enth\"{a}lt. Wenn bei der Konfiguration Ihr System richtig erkannt wurde, werden sie schon richtig gesetzt sein. Die Namen dieser Storage-Resourcen und der Media Type müssen mit jenen übereinstimmen, die in der Konfigurationsdatei des Directors stehen. Wenn Sie in eine Datei anstatt auf ein Band sichern wollen, muss als Archive-Ger\"{a}t ein Verzeichnis angegeben sein, in dem dann die Volumes erzeugt werden und schlie{\ss}lich die Dateien, sobald ein Volume gelabelt wird. + +\label{ConfigTesting} +\section{Test der Konfigurationsdateien} +\index[general]{Test der Konfigurationsdateien} +\index[general]{Konfigurationsdateien!Test der } + +Sie k\"{o}nnen die Konfigurationsdateien auf korrekte Syntax testen, indem sie den entsprechenden D\"{a}mon mit der {\bf -t}-Option starten. Der D\"{a}mon wird die Konfigurationsdatei abarbeiten, gegebenenfalls eine Fehlermeldung ausgeben und sich dann beenden. Das folgende Beispiel geht davon aus, dass die Programm- und die Konfigurationsdateien im gleichen Verzeichnis installiert sind. + +\footnotesize +\begin{verbatim} +cd +./bacula-dir -t -c bacula-dir.conf +./bacula-fd -t -c bacula-fd.conf +./bacula-sd -t -c bacula-sd.conf +./bconsole -t -c bconsole.conf +./gnome-console -t -c gnome-console.conf +./wx-console -t -c wx-console.conf +su -c "./bacula-tray-monitor -t -c tray-monitor.conf" +\end{verbatim} +\normalsize + +Hiermit werden alle Konfigurationsdateien der wichtigsten Programme getestet. Sind diese in Ordnung, beendet sich das Programm, ohne irgendetwas auszugeben. Beachten sie bitte, dass je nach gew\"{a}hlten Konfigurationsoptionen einige oder sogar alle der letzten drei Befehle auf Ihrem System nicht verfügbar sein werden. Wenn Sie die ausführbaren Dateien in die üblichen Unix-Verzeichnisse statt in ein einziges Verzeichnis installiert haben, müssen Sie die obigen Befehle entsprechend anpassen (das ``./'' vor dem Befehlsname weglassen und den Pfad vor den Namen der Konfigurationsdatei angeben). + +\label{TapeTesting} + +\section{Test der Kompatibilit\"{a}t von Bacula mit Ihrem Bandlaufwerk} +\index[general]{Bandlaufwerk! Kompatibilit\"{a}tstest} +\index[general]{Test der Kompatibilit\"{a}t von Bacula mit Ihrem Bandlaufwerk } + +Bevor Sie viel Zeit mit Bacula verschwenden, um schlie{\ss}lich herauszufinden, dass das Programm doch nicht mit Ihrem Bandlaufwerk zusammenarbeitet, lesen Sie bitte das Kapitel \ilink{btape -- Test Ihres Bandlaufwerkes}{_ChapterStart27} in diesem Handbuch. + +Wenn Sie ein neueres SCSI-Bandlaufwerk unter Linux oder Solaris benutzen, wird Bacula vermutlich funktionieren, aber probieren Sie das lieber vorher aus. Benutzer von FreeBSD (und m\"{o}glicherweise andere xBSD-Varianten) müssen das oben erw\"{a}hnte Kapitel lesen. Für FreeBSD gibt es unter \elink{The FreeBSD Diary}{http://www.freebsddiary.org/bacula.php} eine eingehende Beschreibung, wie man Bacula auf Ihrem System zum Laufen bringt. Benutzer von FreeBSD in einer Version vor 4.9-STABLE vom Montag, dem 29.12.2003, 15:18:01, die vorhaben ein Bandlaufwerk zu verwenden, sollten ausserdem die Datei {\bf platforms/freebsd/pthreads-fix.txt} in Baculas Hauptverzeichnis lesen. Darin sind wichtige Informationen zur Kompatibilit\"{a}t von Bacula und Ihrem System. + +\label{notls} + +\section{Das /lib/tls Verzeichnis entfernen} +\index[general]{Das /lib/tls Verzeichnis entfernen } + +Die neue Pthreads-Bibliothek {\bf /lib/tls}, welche standardm\"{a}{\ss}ig von neueren ``RedHat''-Systemen (Kernelversion 2.4.x) installiert wird, ist fehlerhaft. Dieses Verzeichnis muss entfernt oder umbenannt werden, bevor Bacula dann nach einem Neustart lauff\"{a}hig ist. Geschieht dies nicht, wird sich Bacula nach etwa einer Woche Laufzeit entweder für l\"{a}ngere Zeitspannen oder dauerhaft blockieren. Man wird hier wohl eher die entsprechende Umgebungsvariable überschreiben, anstatt das Verzeichnis /lib/tls zu entfernen. Mehr zu diesem Problem im Kapitel \ilink{Unterstützte Betriebssysteme}{SupportedOSes}. + +Auf Systemen mit Kernel-Version 2.6.x scheint dieses Problem nicht aufzutreten. + +\label{Running1} + +\section{Bacula in Betrieb} +\index[general]{Bacula in Betrieb } + +Der vielleicht wichtigste Teil beim Betrieb on Bacula ist die F\"{a}higkeit, Dateien wiederherzustellen. Wenn Sie dies nicht wenigstens einmal ausprobiert haben, bevor Sie tats\"{a}chlich gezwungen sind, es zu tun, werden Sie viel mehr unter Druck stehen und dazu neigen, Fehler zu machen, als wenn sie diesen Vorgang schon einmal getestet haben. + +Um eine Vorstellung davon zu bekommen, wie man Bacula in kurzer Zeit zum Laufen bringt empfehlen wir \textbf{dringend}, das Beispiel im Kapitel \ilink{Running Bacula Chapter}{_ChapterStart1} in diesem Handbuch nachzuvollziehen, wo im einzelnen erkl\"{a}rt wird, wie man Bacula laufen l\"{a}sst. + +\section{Log Rotation} +\index[general]{Rotation!Log } +\index[general]{Log Rotation } + +Wenn Sie die vorgegebene {\bf bacula-dir.conf} oder eine Abwandlung davon benutzen, werden Sie bemerken, dass alle Ausgaben von Bacula in eine Datei gespeichert werden. Um zu verhindern, dass diese Datei ohne Grenze w\"{a}chst, empfehlen wir, die Datei {\bf logrotate} aus dem Verzeichnis {\bf scripts/logrotate} nach {\bf /etc/logrotate.d/bacula} zu kopieren. Dadurch wird die Logdatei einmal im Monat rotiert und h\"{o}chstens fünf Monate lang erhalten. Um die Logrotation Ihren Wünschen anzupassen, k\"{o}nnen Sie diese Datei bearbeiten. + +\section{Log Watch} +\index[general]{Watch!Log} +\index[general]{Log Watch} +Auf manchen Systemen wie RedHat und Fedora l\"{a}uft jede Nacht ein Logwatch-Programm, das Ihre Log-Dateien analysiert und per E-Mail berichtet. Wenn Sie die Ausgaben Ihrer Bacula-Sicherungsjobs diesen Berichten hinzufügen wollen, werfen sie einen Blick in das Verzeichnis {\bf scripts/logwatch}. In der {\bf README}-Datei in diesem Verzeichnis wird kurz erkl\"{a}rt, wie man es installiert und welche Ausgaben zu erwarten sind. + +\section{Disaster Recovery} +\index[general]{Recovery!Disaster } +\index[general]{Disaster Recovery } + +Wenn sie vorhaben, Bacula eher als Werkzeug zur Wiederherstellung Ihres Systems im Notfall, als nur dazu zu verwenden, besch\"{a}digte oder verlorengegangene Dateien wiederherzustellen, werden sie vielleicht das Kapitel \ilink{Disaster Recovery Using Bacula Chapter}{_ChapterStart38} in diesem Handbuch lesen wollen. + +Auf jeden Fall raten wir Ihnen dringend, die Wiederherstellung einiger gesicherte Dateien zu testen anstatt zu warten, bis ein Notfall eintritt. diff --git a/docs/manuals/de/install/security.tex b/docs/manuals/de/install/security.tex new file mode 100644 index 00000000..7866410a --- /dev/null +++ b/docs/manuals/de/install/security.tex @@ -0,0 +1,332 @@ +%% +%% + +\chapter{Bacula Security Issues} +\label{SecurityChapter} +\index[general]{Bacula Security Issues} +\index[general]{Security} +\index[general]{Issues!Bacula Security} + +\begin{itemize} +\item Security means being able to restore your files, so read the + \ilink{Critical Items Chapter}{Critical} of this manual. +\item The Clients ({\bf bacula-fd}) must run as root to be able to access all + the system files. +\item It is not necessary to run the Director as root. +\item It is not necessary to run the Storage daemon as root, but you must + ensure that it can open the tape drives, which are often restricted to root + access by default. In addition, if you do not run the Storage daemon as root, + it will not be able to automatically set your tape drive parameters on most + OSes since these functions, unfortunately require root access. +\item You should restrict access to the Bacula configuration files, so that + the passwords are not world-readable. The {\bf Bacula} daemons are password + protected using CRAM-MD5 (i.e. the password is not sent across the network). + This will ensure that not everyone can access the daemons. It is a reasonably + good protection, but can be cracked by experts. +\item If you are using the recommended ports 9101, 9102, and 9103, you will + probably want to protect these ports from external access using a firewall + and/or using tcp wrappers ({\bf etc/hosts.allow}). +\item By default, all data that is sent across the network is unencrypted. + However, Bacula does support TLS (transport layer security) and can + encrypt transmitted data. Please read the + \ilink{TLS (SSL) Communications Encryption}{CommEncryption} + section of this manual. +\item You should ensure that the Bacula working directories are readable and + writable only by the Bacula daemons. +\item If you are using {\bf MySQL} it is not necessary for it to run with + {\bf root} permission. +\item The default Bacula {\bf grant-mysql-permissions} script grants all + permissions to use the MySQL database without a password. If you want + security, please tighten this up! +\item Don't forget that Bacula is a network program, so anyone anywhere on + the network with the console program and the Director's password can access + Bacula and the backed up data. +\item You can restrict what IP addresses Bacula will bind to by using the + appropriate {\bf DirAddress}, {\bf FDAddress}, or {\bf SDAddress} records in + the respective daemon configuration files. +\item Be aware that if you are backing up your database using the default + script, if you have a password on your database, it will be passed as + a command line option to that script, and any user will be able to see + this information. If you want it to be secure, you will need to pass it + by an environment variable or a secure file. + + See also \ilink{Backing Up Your Bacula + Database - Security Considerations }{BackingUpBaculaSecurityConsiderations} + for more information. +\end{itemize} + + +\section{Backward Compatibility} +\index[general]{Backward Compatibility} +One of the major goals of Bacula is to ensure that you can restore +tapes (I'll use the word tape to include disk Volumes) that you wrote years +ago. This means that each new version of Bacula should be able to read old +format tapes. The first problem you will have is to ensure that the +hardware is still working some years down the road, and the second +problem will be to ensure that the media will still be good, then +your OS must be able to interface to the device, and finally Bacula +must be able to recognize old formats. All the problems except the +last are ones that we cannot solve, but by careful planning you can. + +Since the very beginning of Bacula (January 2000) until today (December +2005), there have been two major Bacula tape formats. The second format +was introduced in version 1.27 in November of 2002, and it has not +changed since then. In principle, Bacula can still read the original +format, but I haven't tried it lately so who knows ... + +Though the tape format is fixed, the kinds of data that we can put on the +tapes are extensible, and that is how we added new features +such as ACLs, Win32 data, encrypted data, ... Obviously, an older +version of Bacula would not know how to read these newer data streams, +but each newer version of Bacula should know how to read all the +older streams. + +If you want to be 100% sure that you can read old tapes, you +should: + +1. Try reading old tapes from time to time -- e.g. at least once +a year. + +2. Keep statically linked copies of every version of Bacula that you use +in production then if for some reason, we botch up old tape compatibility, you +can always pull out an old copy of Bacula ... + +The second point is probably overkill but if you want to be sure, it may +save you someday. + + + +\label{wrappers} +\section{Configuring and Testing TCP Wrappers} +\index[general]{Configuring and Testing TCP Wrappers} +\index[general]{TCP Wrappers} +\index[general]{Wrappers!TCP} +\index[general]{libwrappers} + +TCP Wrappers are implemented if you turn them on when configuring +({\bf ./configure \verb:--:with-tcp-wrappers}). +With this code enabled, you may control who may access your +daemons. This control is done by modifying the file: {\bf +/etc/hosts.allow}. The program name that {\bf Bacula} uses when +applying these access restrictions is the name you specify in the +daemon configuration file (see below for examples). +You must not use the {\bf twist} option in your {\bf +/etc/hosts.allow} or it will terminate the Bacula daemon when a +connection is refused. + +The exact name of the package you need loaded to build with TCP wrappers +depends on the system. For example, +on SuSE, the TCP wrappers libraries needed to link Bacula are +contained in the tcpd-devel package. On Red Hat, the package is named +tcp\_wrappers. + +Dan Langille has provided the following information on configuring and +testing TCP wrappers with Bacula. + +If you read hosts\_options(5), you will see an option called twist. This +option replaces the current process by an instance of the specified shell +command. Typically, something like this is used: + +\footnotesize +\begin{verbatim} +ALL : ALL \ + : severity auth.info \ + : twist /bin/echo "You are not welcome to use %d from %h." +\end{verbatim} +\normalsize + +The libwrap code tries to avoid {\bf twist} if it runs in a resident process, +but that test will not protect the first hosts\_access() call. This will +result in the process (e.g. bacula-fd, bacula-sd, bacula-dir) being terminated +if the first connection to their port results in the twist option being +invoked. The potential, and I stress potential, exists for an attacker to +prevent the daemons from running. This situation is eliminated if your +/etc/hosts.allow file contains an appropriate rule set. The following example +is sufficient: + +\footnotesize +\begin{verbatim} +undef-fd : localhost : allow +undef-sd : localhost : allow +undef-dir : localhost : allow +undef-fd : ALL : deny +undef-sd : ALL : deny +undef-dir : ALL : deny +\end{verbatim} +\normalsize + +You must adjust the names to be the same as the Name directives found +in each of the daemon configuration files. They are, in general, not the +same as the binary daemon names. It is not possible to use the +daemon names because multiple daemons may be running on the same machine +but with different configurations. + +In these examples, the Director is undef-dir, the +Storage Daemon is undef-sd, and the File Daemon is undef-fd. Adjust to suit +your situation. The above example rules assume that the SD, FD, and DIR all +reside on the same box. If you have a remote FD client, then the following +rule set on the remote client will suffice: + +\footnotesize +\begin{verbatim} +undef-fd : director.example.org : allow +undef-fd : ALL : deny +\end{verbatim} +\normalsize + +where director.example.org is the host which will be contacting the client +(ie. the box on which the Bacula Director daemon runs). The use of "ALL : +deny" ensures that the twist option (if present) is not invoked. To properly +test your configuration, start the daemon(s), then attempt to connect from an +IP address which should be able to connect. You should see something like +this: + +\footnotesize +\begin{verbatim} +$ telnet undef 9103 +Trying 192.168.0.56... +Connected to undef.example.org. +Escape character is '^]'. +Connection closed by foreign host. +$ +\end{verbatim} +\normalsize + +This is the correct response. If you see this: + +\footnotesize +\begin{verbatim} +$ telnet undef 9103 +Trying 192.168.0.56... +Connected to undef.example.org. +Escape character is '^]'. +You are not welcome to use undef-sd from xeon.example.org. +Connection closed by foreign host. +$ +\end{verbatim} +\normalsize + +then twist has been invoked and your configuration is not correct and you need +to add the deny statement. It is important to note that your testing must +include restarting the daemons after each connection attempt. You can also +tcpdchk(8) and tcpdmatch(8) to validate your /etc/hosts.allow rules. Here is a +simple test using tcpdmatch: + +\footnotesize +\begin{verbatim} +$ tcpdmatch undef-dir xeon.example.org +warning: undef-dir: no such process name in /etc/inetd.conf +client: hostname xeon.example.org +client: address 192.168.0.18 +server: process undef-dir +matched: /etc/hosts.allow line 40 +option: allow +access: granted +\end{verbatim} +\normalsize + +If you are running Bacula as a standalone daemon, the warning above can be +safely ignored. Here is an example which indicates that your rules are missing +a deny statement and the twist option has been invoked. + +\footnotesize +\begin{verbatim} +$ tcpdmatch undef-dir 10.0.0.1 +warning: undef-dir: no such process name in /etc/inetd.conf +client: address 10.0.0.1 +server: process undef-dir +matched: /etc/hosts.allow line 91 +option: severity auth.info +option: twist /bin/echo "You are not welcome to use + undef-dir from 10.0.0.1." +access: delegated +\end{verbatim} +\normalsize + +\section{Running as non-root} +\index[general]{Running as non-root } + +Security advice from Dan Langille: +% TODO: don't use specific name + +% TODO: don't be too specific on operating system + +% TODO: maybe remove personalization? + +It is a good idea to run daemons with the lowest possible privileges. In +other words, if you can, don't run applications as root which do not have to +be root. The Storage Daemon and the Director Daemon do not need to be root. +The File Daemon needs to be root in order to access all files on your system. +In order to run as non-root, you need to create a user and a group. Choosing +{\tt bacula} as both the user name and the group name sounds like a good idea +to me. + +The FreeBSD port creates this user and group for you. +Here is what those entries looked like on my FreeBSD laptop: + +\footnotesize +\begin{verbatim} +bacula:*:1002:1002::0:0:Bacula Daemon:/var/db/bacula:/sbin/nologin +\end{verbatim} +\normalsize + +I used vipw to create this entry. I selected a User ID and Group ID of 1002 +as they were unused on my system. + +I also created a group in /etc/group: + +\footnotesize +\begin{verbatim} +bacula:*:1002: +\end{verbatim} +\normalsize + +The bacula user (as opposed to the Bacula daemon) will have a home directory +of {\tt /var/db/bacula} which is the default location for the Bacula +database. + +Now that you have both a bacula user and a bacula group, you can secure the +bacula home directory by issuing this command: + +\footnotesize +\begin{verbatim} +chown -R bacula:bacula /var/db/bacula/ +\end{verbatim} +\normalsize + +This ensures that only the bacula user can access this directory. It also +means that if we run the Director and the Storage daemon as bacula, those +daemons also have restricted access. This would not be the case if they were +running as root. + +It is important to note that the storage daemon actually needs to be in the +operator group for normal access to tape drives etc (at least on a FreeBSD +system, that's how things are set up by default) Such devices are normally +chown root:operator. It is easier and less error prone to make Bacula a +member of that group than it is to play around with system permissions. + +Starting the Bacula daemons + +To start the bacula daemons on a FreeBSD system, issue the following command: + +\footnotesize +\begin{verbatim} +/usr/local/etc/rc.d/bacula-dir start +/usr/local/etc/rc.d/bacula-sd start +/usr/local/etc/rc.d/bacula-fd start +\end{verbatim} +\normalsize + +To confirm they are all running: + +\footnotesize +\begin{verbatim} +$ ps auwx | grep bacula +root 63418 0.0 0.3 1856 1036 ?? Ss 4:09PM 0:00.00 + /usr/local/sbin/bacula-fd -v -c /usr/local/etc/bacula-fd.conf +bacula 63416 0.0 0.3 2040 1172 ?? Ss 4:09PM 0:00.01 + /usr/local/sbin/bacula-sd -v -c /usr/local/etc/bacula-sd.conf +bacula 63422 0.0 0.4 2360 1440 ?? Ss 4:09PM 0:00.00 + /usr/local/sbin/bacula-dir -v -c /usr/local/etc/bacula-dir.conf +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/install/setup.sm b/docs/manuals/de/install/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/install/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/install/storedconf.tex b/docs/manuals/de/install/storedconf.tex new file mode 100644 index 00000000..34133bf9 --- /dev/null +++ b/docs/manuals/de/install/storedconf.tex @@ -0,0 +1,1374 @@ +%% +%% + +\chapter{Storage Daemon Configuration} +\label{StoredConfChapter} +\index[general]{Storage Daemon Configuration} +\index[general]{Configuration!Storage Daemon} + +The Storage Daemon configuration file has relatively few resource definitions. +However, due to the great variation in backup media and system capabilities, +the storage daemon must be highly configurable. As a consequence, there are +quite a large number of directives in the Device Resource definition that +allow you to define all the characteristics of your Storage device (normally a +tape drive). Fortunately, with modern storage devices, the defaults are +sufficient, and very few directives are actually needed. + +Examples of {\bf Device} resource directives that are known to work for a +number of common tape drives can be found in the {\bf +\lt{}bacula-src\gt{}/examples/devices} directory, and most will also be listed +here. + +For a general discussion of configuration file and resources including the +data types recognized by {\bf Bacula}, please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. The +following Storage Resource definitions must be defined: + +\begin{itemize} +\item + \ilink{Storage}{StorageResource} -- to define the name of the + Storage daemon. +\item + \ilink{Director}{DirectorResource1} -- to define the Director's + name and his access password. +\item + \ilink{Device}{DeviceResource} -- to define the + characteristics of your storage device (tape drive). +\item + \ilink{Messages}{MessagesChapter} -- to define where error and + information messages are to be sent. +\end{itemize} + +\section{Storage Resource} +\label{StorageResource} +\index[general]{Resource!Storage} +\index[general]{Storage Resource} + +In general, the properties specified under the Storage resource define global +properties of the Storage daemon. Each Storage daemon configuration file must +have one and only one Storage resource definition. + +\begin{description} + +\item [Name = \lt{}Storage-Daemon-Name\gt{}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name of the Storage daemon. This directive is required. + +\item [Working Directory = \lt{}Directory\gt{}] + \index[sd]{Working Directory} + \index[sd]{Directive!Working Directory} + This directive is mandatory and specifies a directory in which the Storage + daemon may put its status files. This directory should be used only by {\bf + Bacula}, but may be shared by other Bacula daemons provided the names + given to each daemon are unique. This directive is + required + +\item [Pid Directory = \lt{}Directory\gt{}] + \index[sd]{Pid Directory} + \index[sd]{Directive!Pid Directory} + This directive is mandatory and specifies a directory in which the Director + may put its process Id file files. The process Id file is used to shutdown + Bacula and to prevent multiple copies of Bacula from running simultaneously. + This directive is required. Standard shell expansion of the {\bf Directory} + is done when the configuration file is read so that values such as {\bf + \$HOME} will be properly expanded. + + Typically on Linux systems, you will set this to: {\bf /var/run}. If you are + not installing Bacula in the system directories, you can use the {\bf Working + Directory} as defined above. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[sd]{Heartbeat Interval} + \index[sd]{Directive!Heartbeat Interval} + \index[general]{Heartbeat Interval} + \index[general]{Broken pipe} + This directive defines an interval of time in seconds. When + the Storage daemon is waiting for the operator to mount a + tape, each time interval, it will send a heartbeat signal to + the File daemon. The default interval is zero which disables + the heartbeat. This feature is particularly useful if you + have a router such as 3Com that does not follow Internet + standards and times out an valid connection after a short + duration despite the fact that keepalive is set. This usually + results in a broken pipe error message. + +\item [Client Connect Wait = \lt{}time-interval\gt{}] + \index[sd]{Connect Wait} + \index[sd]{Directive!Connect Wait} + \index[general]{Client Connect Wait} + This directive defines an interval of time in seconds that + the Storage daemon will wait for a Client (the File daemon) + to connect. The default is 30 seconds. Be aware that the + longer the Storage daemon waits for a Client, the more + resources will be tied up. + +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] + \index[sd]{Maximum Concurrent Jobs} + \index[sd]{Directive!Maximum Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of Jobs that should run + concurrently. The default is set to 10, but you may set it to a larger + number. Each contact from the Director (e.g. status request, job start + request) is considered as a Job, so if you want to be able to do a {\bf + status} request in the console at the same time as a Job is running, you + will need to set this value greater than 1. To run simultaneous Jobs, + you will need to set a number of other directives in the Director's + configuration file. Which ones you set depend on what you want, but you + will almost certainly need to set the {\bf Maximum Concurrent Jobs} in + the Storage resource in the Director's configuration file and possibly + those in the Job and Client resources. + +\item [SDAddresses = \lt{}IP-address-specification\gt{}] + \index[sd]{SDAddresses} + \index[sd]{Directive!SDAddresses} + Specify the ports and addresses on which the Storage daemon will listen + for Director connections. Normally, the default is sufficient and you + do not need to specify this directive. Probably the simplest way to + explain how this directive works is to show an example: + +\footnotesize +\begin{verbatim} + SDAddresses = { ip = { + addr = 1.2.3.4; port = 1205; } + ipv4 = { + addr = 1.2.3.4; port = http; } + ipv6 = { + addr = 1.2.3.4; + port = 1205; + } + ip = { + addr = 1.2.3.4 + port = 1205 + } + ip = { + addr = 1.2.3.4 + } + ip = { + addr = 201:220:222::2 + } + ip = { + addr = bluedot.thun.net + } +} +\end{verbatim} +\normalsize + +where ip, ip4, ip6, addr, and port are all keywords. Note, that the address +can be specified as either a dotted quadruple, or IPv6 colon notation, or as +a symbolic name (only in the ip specification). Also, port can be specified +as a number or as the mnemonic value from the /etc/services file. If a port +is not specified, the default will be used. If an ip section is specified, +the resolution can be made either by IPv4 or IPv6. If ip4 is specified, then +only IPv4 resolutions will be permitted, and likewise with ip6. + +Using this directive, you can replace both the SDPort and SDAddress +directives shown below. + +\item [SDPort = \lt{}port-number\gt{}] + \index[sd]{SDPort} + \index[sd]{Directive!SDPort} + Specifies port number on which the Storage daemon listens for Director + connections. The default is 9103. + +\item [SDAddress = \lt{}IP-Address\gt{}] + \index[sd]{SDAddress} + \index[sd]{Directive!SDAddress} + This directive is optional, and if it is specified, it will cause the + Storage daemon server (for Director and File daemon connections) to bind + to the specified {\bf IP-Address}, which is either a domain name or an + IP address specified as a dotted quadruple. If this directive is not + specified, the Storage daemon will bind to any available address (the + default). + +\end{description} + +The following is a typical Storage daemon Storage definition. + +\footnotesize +\begin{verbatim} +# +# "Global" Storage daemon configuration specifications appear +# under the Storage resource. +# +Storage { + Name = "Storage daemon" + Address = localhost + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" +} +\end{verbatim} +\normalsize + +\section{Director Resource} +\label{DirectorResource1} +\index[general]{Director Resource} +\index[general]{Resource!Director} + +The Director resource specifies the Name of the Director which is permitted +to use the services of the Storage daemon. There may be multiple Director +resources. The Director Name and Password must match the corresponding +values in the Director's configuration file. + +\begin{description} + +\item [Name = \lt{}Director-Name\gt{}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name of the Director allowed to connect to the Storage daemon. + This directive is required. + +\item [Password = \lt{}Director-password\gt{}] + \index[sd]{Password} + \index[sd]{Directive!Password} + Specifies the password that must be supplied by the above named Director. + This directive is required. + +\item [Monitor = \lt{}yes|no\gt{}] + \index[sd]{Monitor} + \index[sd]{Directive!Monitor} + If Monitor is set to {\bf no} (default), this director will have full + access to this Storage daemon. If Monitor is set to {\bf yes}, this + director will only be able to fetch the current status of this Storage + daemon. + + Please note that if this director is being used by a Monitor, we highly + recommend to set this directive to {\bf yes} to avoid serious security + problems. + +\end{description} + +The following is an example of a valid Director resource definition: + +\footnotesize +\begin{verbatim} +Director { + Name = MainDirector + Password = my_secret_password +} +\end{verbatim} +\normalsize + +\label{DeviceResource} +\section{Device Resource} +\index[general]{Resource!Device} +\index[general]{Device Resource} + +The Device Resource specifies the details of each device (normally a tape +drive) that can be used by the Storage daemon. There may be multiple +Device resources for a single Storage daemon. In general, the properties +specified within the Device resource are specific to the Device. + +\begin{description} + +\item [Name = {\it Device-Name}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name that the Director will use when asking to backup or + restore to or from to this device. This is the logical Device name, and may + be any string up to 127 characters in length. It is generally a good idea to + make it correspond to the English name of the backup device. The physical + name of the device is specified on the {\bf Archive Device} directive + described below. The name you specify here is also used in your Director's + conf file on the + \ilink{Device directive}{StorageResource2} in its Storage + resource. + +\item [Archive Device = {\it name-string}] + \index[sd]{Archive Device} + \index[sd]{Directive!Archive Device} + The specified {\bf name-string} gives the system file name of the storage + device managed by this storage daemon. This will usually be the device file + name of a removable storage device (tape drive), for example "{\bf + /dev/nst0}" or "{\bf /dev/rmt/0mbn}". For a DVD-writer, it will be for + example {\bf /dev/hdc}. It may also be a directory name if you are archiving + to disk storage. In this case, you must supply the full absolute path to the + directory. When specifying a tape device, it is preferable that the + "non-rewind" variant of the device file name be given. In addition, on + systems such as Sun, which have multiple tape access methods, you must be + sure to specify to use Berkeley I/O conventions with the device. The {\bf b} + in the Solaris (Sun) archive specification {\bf /dev/rmt/0mbn} is what is + needed in this case. Bacula does not support SysV tape drive behavior. + + As noted above, normally the Archive Device is the name of a tape drive, but + you may also specify an absolute path to an existing directory. If the Device + is a directory Bacula will write to file storage in the specified directory, + and the filename used will be the Volume name as specified in the Catalog. + If you want to write into more than one directory (i.e. to spread the load to + different disk drives), you will need to define two Device resources, each + containing an Archive Device with a different directory. + \label{SetupFifo} + In addition to a tape device name or a directory name, Bacula will accept the + name of a FIFO. A FIFO is a special kind of file that connects two programs + via kernel memory. If a FIFO device is specified for a backup operation, you + must have a program that reads what Bacula writes into the FIFO. When the + Storage daemon starts the job, it will wait for {\bf MaximumOpenWait} seconds + for the read program to start reading, and then time it out and terminate + the job. As a consequence, it is best to start the read program at the + beginning of the job perhaps with the {\bf RunBeforeJob} directive. For this + kind of device, you never want to specify {\bf AlwaysOpen}, because you want + the Storage daemon to open it only when a job starts, so you must explicitly + set it to {\bf No}. Since a FIFO is a one way device, Bacula will not attempt + to read a label of a FIFO device, but will simply write on it. To create a + FIFO Volume in the catalog, use the {\bf add} command rather than the {\bf + label} command to avoid attempting to write a label. + +\footnotesize +\begin{verbatim} +Device { + Name = FifoStorage + Media Type = Fifo + Device Type = Fifo + Archive Device = /tmp/fifo + LabelMedia = yes + Random Access = no + AutomaticMount = no + RemovableMedia = no + MaximumOpenWait = 60 + AlwaysOpen = no +} +\end{verbatim} +\normalsize + + During a restore operation, if the Archive Device is a FIFO, Bacula will + attempt to read from the FIFO, so you must have an external program that + writes into the FIFO. Bacula will wait {\bf MaximumOpenWait} seconds for the + program to begin writing and will then time it out and terminate the job. As + noted above, you may use the {\bf RunBeforeJob} to start the writer program + at the beginning of the job. + + The Archive Device directive is required. + +\item [Device Type = {\it type-specification}] + \index[sd]{Device Type} + \index[sd]{Directive!Device Type} + The Device Type specification allows you to explicitly tell Bacula + what kind of device you are defining. It the {\it type-specification} + may be one of the following: + \begin{description} + \item [File] + Tells Bacula that the device is a file. It may either be a + file defined on fixed medium or a removable filesystem such as + USB. All files must be random access devices. + \item [Tape] + The device is a tape device and thus is sequential access. Tape devices + are controlled using ioctl() calls. + \item [Fifo] + The device is a first-in-first out sequential access read-only + or write-only device. + \item [DVD] + The device is a DVD. DVDs are sequential access for writing, but + random access for reading. + \end{description} + + The Device Type directive is not required, and if not specified, Bacula + will attempt to guess what kind of device has been specified using the + Archive Device specification supplied. There are several advantages to + explicitly specifying the Device Type. First, on some systems, block and + character devices have the same type, which means that on those systems, + Bacula is unlikely to be able to correctly guess that a device is a DVD. + Secondly, if you explicitly specify the Device Type, the mount point + need not be defined until the device is opened. This is the case with + most removable devices such as USB that are mounted by the HAL daemon. + If the Device Type is not explicitly specified, then the mount point + must exist when the Storage daemon starts. + + This directive was implemented in Bacula version 1.38.6. + + +\item [Media Type = {\it name-string}] + \index[sd]{Media Type} + \index[sd]{Directive!Media Type} + The specified {\bf name-string} names the type of media supported by this + device, for example, "DLT7000". Media type names are arbitrary in that you + set them to anything you want, but they must be known to the volume + database to keep track of which storage daemons can read which volumes. In + general, each different storage type should have a unique Media Type + associated with it. The same {\bf name-string} must appear in the + appropriate Storage resource definition in the Director's configuration + file. + + Even though the names you assign are arbitrary (i.e. you choose the name + you want), you should take care in specifying them because the Media Type + is used to determine which storage device Bacula will select during + restore. Thus you should probably use the same Media Type specification + for all drives where the Media can be freely interchanged. This is not + generally an issue if you have a single Storage daemon, but it is with + multiple Storage daemons, especially if they have incompatible media. + + For example, if you specify a Media Type of "DDS-4" then during the + restore, Bacula will be able to choose any Storage Daemon that handles + "DDS-4". If you have an autochanger, you might want to name the Media Type + in a way that is unique to the autochanger, unless you wish to possibly use + the Volumes in other drives. You should also ensure to have unique Media + Type names if the Media is not compatible between drives. This + specification is required for all devices. + + In addition, if you are using disk storage, each Device resource will + generally have a different mount point or directory. In order for + Bacula to select the correct Device resource, each one must have a + unique Media Type. + +\label{Autochanger} +\item [Autochanger = {\it Yes|No}] + \index[sd]{Autochanger} + \index[sd]{Directive!Autochanger} + If {\bf Yes}, this device belongs to an automatic tape changer, and you + must specify an {\bf Autochanger} resource that points to the {\bf + Device} resources. You must also specify a + {\bf Changer Device}. If the Autochanger directive is set to {\bf + No} (default), the volume must be manually changed. You should also + have an identical directive to the + \ilink{Storage resource}{Autochanger1} in the Director's + configuration file so that when labeling tapes you are prompted for the slot. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + \index[sd]{Directive!Changer Device} + The specified {\bf name-string} must be the {\bf generic SCSI} device + name of the autochanger that corresponds to the normal read/write + {\bf Archive Device} specified in the Device resource. This + generic SCSI device name should be specified if you have an autochanger + or if you have a standard tape drive and want to use the + {\bf Alert Command} (see below). For example, on Linux systems, for + an Archive Device name of {\bf /dev/nst0}, you would specify {\bf + /dev/sg0} for the Changer Device name. Depending on your exact + configuration, and the number of autochangers or the type of + autochanger, what you specify here can vary. This directive is + optional. See the \ilink{ Using Autochangers}{AutochangersChapter} chapter + of this manual for more details of using this and the following + autochanger directives. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command} + \index[sd]{Directive!Changer Command} + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Normally, + this directive will be specified only in the {\bf AutoChanger} resource, + which is then used for all devices. However, you may also specify + the different {\bf Changer Command} in each Device resource. + Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows: + +\footnotesize +\begin{verbatim} +Changer Command = "/path/mtx-changer %c %o %S %a %d" +\end{verbatim} +\normalsize + + and you will install the {\bf mtx} on your system (found in the {\bf depkgs} + release). An example of this command is in the default bacula-sd.conf file. + For more details on the substitution characters that may be specified to + configure your autochanger please see the + \ilink{Autochangers}{AutochangersChapter} chapter of this manual. + For FreeBSD users, you might want to see one of the several {\bf chio} + scripts in {\bf examples/autochangers}. + +\item [Alert Command = {\it name-string}] + \index[sd]{Alert Command} + The {\bf name-string} specifies an external program to be called at the + completion of each Job after the device is released. The purpose of this + command is to check for Tape Alerts, which are present when something is + wrong with your tape drive (at least for most modern tape drives). The same + substitution characters that may be specified in the Changer Command may also + be used in this string. For more information, please see the + \ilink{Autochangers}{AutochangersChapter} chapter of this manual. + + + Note, it is not necessary to have an autochanger to use this command. The + example below uses the {\bf tapeinfo} program that comes with the {\bf mtx} + package, but it can be used on any tape drive. However, you will need to + specify a {\bf Changer Device} directive in your Device resource (see above) + so that the generic SCSI device name can be edited into the command (with the + \%c). + + An example of the use of this command to print Tape Alerts in the Job report + is: + +\footnotesize +\begin{verbatim} +Alert Command = "sh -c 'tapeinfo -f %c | grep TapeAlert'" + +\end{verbatim} +\normalsize + +and an example output when there is a problem could be: + +\footnotesize +\begin{verbatim} +bacula-sd Alert: TapeAlert[32]: Interface: Problem with SCSI interface + between tape drive and initiator. + +\end{verbatim} +\normalsize + +\item [Drive Index = {\it number}] + \index[sd]{Drive Index} + \index[sd]{Directive!Drive Index} + The {\bf Drive Index} that you specify is passed to the {\bf + mtx-changer} script and is thus passed to the {\bf mtx} program. By + default, the Drive Index is zero, so if you have only one drive in your + autochanger, everything will work normally. However, if you have + multiple drives, you must specify multiple Bacula Device resources (one + for each drive). The first Device should have the Drive Index set to 0, + and the second Device Resource should contain a Drive Index set to 1, + and so on. This will then permit you to use two or more drives in your + autochanger. As of Bacula version 1.38.0, using the {\bf Autochanger} + resource, Bacula will automatically ensure that only one drive at a time + uses the autochanger script, so you no longer need locking scripts as in + the past -- the default mtx-changer script works for any number of + drives. + +\item [Autoselect = {\it Yes|No}] + \index[sd]{Autoselect} + \index[sd]{Directive!Autoselect} + If this directive is set to {\bf yes} (default), and the Device + belongs to an autochanger, then when the Autochanger is referenced + by the Director, this device can automatically be selected. If this + directive is set to {\bf no}, then the Device can only be referenced + by directly using the Device name in the Director. This is useful + for reserving a drive for something special such as a high priority + backup or restore operations. + +\item [Maximum Changer Wait = {\it time}] + \index[sd]{Maximum Changer Wait} + \index[sd]{Directive!Maximum Changer Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for an autochanger to change the volume. If this time is exceeded, + Bacula will invalidate the Volume slot number stored in the catalog and + try again. If no additional changer volumes exist, Bacula will ask the + operator to intervene. The default is 5 minutes. +% TODO: if this is the format, then maybe "5 minutes" should be in +% TODO: quotes? define style. see others. + +\item [Maximum Rewind Wait = {\it time}] + \index[sd]{Maximum Rewind Wait} + \index[sd]{Directive!Maximum Rewind Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for a rewind before timing out. If this time is exceeded, + Bacula will cancel the job. The default is 5 minutes. + +\item [Maximum Open Wait = {\it time}] + \index[sd]{Maximum Open Wait} + \index[sd]{Directive!Maximum Open Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for a open before timing out. If this time is exceeded, + Bacula will cancel the job. The default is 5 minutes. + +\item [Always Open = {\it Yes|No}] + \index[sd]{Always Open} + \index[sd]{Directive!Always Open} + If {\bf Yes} (default), Bacula will always keep the device open unless + specifically {\bf unmounted} by the Console program. This permits + Bacula to ensure that the tape drive is always available, and properly + positioned. If you set + {\bf AlwaysOpen} to {\bf no} {\bf Bacula} will only open the drive when + necessary, and at the end of the Job if no other Jobs are using the + drive, it will be freed. The next time Bacula wants to append to a tape + on a drive that was freed, Bacula will rewind the tape and position it to + the end. To avoid unnecessary tape positioning and to minimize + unnecessary operator intervention, it is highly recommended that {\bf + Always Open = yes}. This also ensures that the drive is available when + Bacula needs it. + + If you have {\bf Always Open = yes} (recommended) and you want to use the + drive for something else, simply use the {\bf unmount} command in the Console + program to release the drive. However, don't forget to remount the drive with + {\bf mount} when the drive is available or the next Bacula job will block. + + For File storage, this directive is ignored. For a FIFO storage device, you + must set this to {\bf No}. + + Please note that if you set this directive to {\bf No} Bacula will release + the tape drive between each job, and thus the next job will rewind the tape + and position it to the end of the data. This can be a very time consuming + operation. In addition, with this directive set to no, certain multiple + drive autochanger operations will fail. We strongly recommend to keep + {\bf Always Open} set to {\bf Yes} + +\item [Volume Poll Interval = {\it time}] + \index[sd]{Volume Poll Interval} + \index[sd]{Directive!Volume Poll Interval} + If the time specified on this directive is non-zero, after asking the + operator to mount a new volume Bacula will periodically poll (or read) the + drive at the specified interval to see if a new volume has been mounted. If + the time interval is zero (the default), no polling will occur. This + directive can be useful if you want to avoid operator intervention via the + console. Instead, the operator can simply remove the old volume and insert + the requested one, and Bacula on the next poll will recognize the new tape + and continue. Please be aware that if you set this interval too small, you + may excessively wear your tape drive if the old tape remains in the drive, + since Bacula will read it on each poll. This can be avoided by ejecting the + tape using the {\bf Offline On Unmount} and the {\bf Close on Poll} + directives. + However, if you are using a Linux 2.6 kernel or other OSes + such as FreeBSD or Solaris, the Offline On Unmount will leave the drive + with no tape, and Bacula will not be able to properly open the drive and + may fail the job. For more information on this problem, please see the + \ilink{description of Offline On Unmount}{NoTapeInDrive} in the Tape + Testing chapter. + +\item [Close on Poll= {\it Yes|No}] + \index[sd]{Close on Poll} + \index[sd]{Directive!Close on Poll} + If {\bf Yes}, Bacula close the device (equivalent to an unmount except no + mount is required) and reopen it at each poll. Normally this is not too + useful unless you have the {\bf Offline on Unmount} directive set, in which + case the drive will be taken offline preventing wear on the tape during any + future polling. Once the operator inserts a new tape, Bacula will recognize + the drive on the next poll and automatically continue with the backup. + Please see above more more details. + +\item [Maximum Open Wait = {\it time}] + \index[sd]{Maximum Open Wait} + \index[sd]{Directive!Maximum Open Wait} + This directive specifies the maximum amount of time in seconds that + Bacula will wait for a device that is busy. The default is 5 minutes. + If the device cannot be obtained, the current Job will be terminated in + error. Bacula will re-attempt to open the drive the next time a Job + starts that needs the the drive. + +\label{removablemedia} +\item [Removable media = {\it Yes|No}] + \index[sd]{Removable media} + \index[sd]{Directive!Removable media} + If {\bf Yes}, this device supports removable media (for example, tapes + or CDs). If {\bf No}, media cannot be removed (for example, an + intermediate backup area on a hard disk). If {\bf Removable media} is + enabled on a File device (as opposed to a tape) the Storage daemon will + assume that device may be something like a USB device that can be + removed or a simply a removable harddisk. When attempting to open + such a device, if the Volume is not found (for File devices, the Volume + name is the same as the Filename), then the Storage daemon will search + the entire device looking for likely Volume names, and for each one + found, it will ask the Director if the Volume can be used. If so, + the Storage daemon will use the first such Volume found. Thus it + acts somewhat like a tape drive -- if the correct Volume is not found, + it looks at what actually is found, and if it is an appendable Volume, + it will use it. + + If the removable medium is not automatically mounted (e.g. udev), then + you might consider using additional Storage daemon device directives + such as {\bf Requires Mount}, {\bf Mount Point}, {\bf Mount Command}, + and {\bf Unmount Command}, all of which can be used in conjunction with + {\bf Removable Media}. + + +\item [Random access = {\it Yes|No}] + \index[sd]{Random access} + \index[sd]{Directive!Random access} + If {\bf Yes}, the archive device is assumed to be a random access medium + which supports the {\bf lseek} (or {\bf lseek64} if Largefile is enabled + during configuration) facility. This should be set to {\bf Yes} for all + file systems such as DVD, USB, and fixed files. It should be set to + {\bf No} for non-random access devices such as tapes and named pipes. + + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount } + When this directive is enabled, the Storage daemon will submit + a {\bf Mount Command} before attempting to open the device. + You must set this directive to {\bf yes} for DVD-writers and removable + file systems such as USB devices that are not automatically mounted + by the operating system when plugged in or opened by Bacula. + It should be set to {\bf no} for + all other devices such as tapes and fixed filesystems. It should also + be set to {\bf no} for any removable device that is automatically + mounted by the operating system when opened (e.g. USB devices mounted + by udev or hotplug). This directive + indicates if the device requires to be mounted using the {\bf Mount + Command}. To be able to write a DVD, the following directives must also + be defined: {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount + Command} and {\bf Write Part Command}. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + Directory where the device can be mounted. + This directive is used only + for devices that have {\bf Requires Mount} enabled such as DVD or + USB file devices. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + This directive specifies the command that must be executed to mount + devices such as DVDs and many USB devices. For DVDs, the + device is written directly, but the mount command is necessary in + order to determine the free space left on the DVD. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, for a DVD, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +However, if you have defined a mount point in /etc/fstab, you might be +able to use a mount command such as: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount /media/dvd" +\end{verbatim} +\normalsize + +See the \ilink {Edit Codes}{mountcodes} section below for more details of +the editing codes that can be used in this directive. + + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + This directive specifies the command that must be executed to unmount + devices such as DVDs and many USB devices. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +See the \ilink {Edit Codes}{mountcodes} section below for more details of +the editing codes that can be used in this directive. + + +\item [Minimum block size = {\it size-in-bytes}] + \index[sd]{Minimum block size} + \index[sd]{Directive!Minimum block size} + On most modern tape drives, you will not need or want to specify this + directive, and if you do so, it will be to make Bacula use fixed block + sizes. This statement applies only to non-random access devices (e.g. + tape drives). Blocks written by the storage daemon to a non-random + archive device will never be smaller than the given {\bf size-in-bytes}. + The Storage daemon will attempt to efficiently fill blocks with data + received from active sessions but will, if necessary, add padding to a + block to achieve the required minimum size. + + To force the block size to be fixed, as is the case for some non-random + access devices (tape drives), set the {\bf Minimum block size} and the + {\bf Maximum block size} to the same value (zero included). The default + is that both the minimum and maximum block size are zero and the default + block size is 64,512 bytes. + + For example, suppose you want a fixed block size of 100K bytes, then you + would specify: + +\footnotesize +\begin{verbatim} + + Minimum block size = 100K + Maximum block size = 100K + +\end{verbatim} +\normalsize + + Please note that if you specify a fixed block size as shown above, the tape + drive must either be in variable block size mode, or if it is in fixed block + size mode, the block size (generally defined by {\bf mt}) {\bf must} be + identical to the size specified in Bacula -- otherwise when you attempt to + re-read your Volumes, you will get an error. + + If you want the block size to be variable but with a 64K minimum and 200K + maximum (and default as well), you would specify: + +\footnotesize +\begin{verbatim} + + Minimum block size = 64K + Maximum blocksize = 200K + +\end{verbatim} +\normalsize + +\item [Maximum block size = {\it size-in-bytes}] + \index[sd]{Maximum block size} + \index[sd]{Directive!Maximum block size} + On most modern tape drives, you will not need to specify this directive. + If you do so, it will most likely be to use fixed block sizes (see + Minimum block size above). The Storage daemon will always attempt to + write blocks of the specified {\bf size-in-bytes} to the archive device. + As a consequence, this statement specifies both the default block size + and the maximum block size. The size written never exceed the given + {\bf size-in-bytes}. If adding data to a block would cause it to exceed + the given maximum size, the block will be written to the archive device, + and the new data will begin a new block. + + If no value is specified or zero is specified, the Storage daemon will + use a default block size of 64,512 bytes (126 * 512). + +\item [Hardware End of Medium = {\it Yes|No}] + \index[sd]{Hardware End of Medium} + \index[sd]{Directive!Hardware End of Medium} + If {\bf No}, the archive device is not required to support end of medium + ioctl request, and the storage daemon will use the forward space file + function to find the end of the recorded data. If {\bf Yes}, the archive + device must support the {\tt ioctl} {\tt MTEOM} call, which will position the + tape to the end of the recorded data. In addition, your SCSI driver must keep + track of the file number on the tape and report it back correctly by the + {\bf MTIOCGET} ioctl. Note, some SCSI drivers will correctly forward space to + the end of the recorded data, but they do not keep track of the file number. + On Linux machines, the SCSI driver has a {\bf fast-eod} option, which if set + will cause the driver to lose track of the file number. You should ensure + that this option is always turned off using the {\bf mt} program. + + Default setting for Hardware End of Medium is {\bf Yes}. This function is + used before appending to a tape to ensure that no previously written data is + lost. We recommend if you have a non-standard or unusual tape drive that you + use the {\bf btape} program to test your drive to see whether or not it + supports this function. All modern (after 1998) tape drives support this + feature. + +\item [Fast Forward Space File = {\it Yes|No}] + \index[sd]{Fast Forward Space File} + \index[sd]{Directive!Fast Forward Space File} + If {\bf No}, the archive device is not required to support keeping track of + the file number ({\bf MTIOCGET} ioctl) during forward space file. If {\bf + Yes}, the archive device must support the {\tt ioctl} {\tt MTFSF} call, which + virtually all drivers support, but in addition, your SCSI driver must keep + track of the file number on the tape and report it back correctly by the + {\bf MTIOCGET} ioctl. Note, some SCSI drivers will correctly forward space, + but they do not keep track of the file number or more seriously, they do not + report end of medium. + + Default setting for Fast Forward Space File is {\bf Yes}. + +\item [Use MTIOCGET = {\it Yes|No}] + \index[sd]{Use MTIOCGET} + \index[sd]{Directive!Use MTIOCGET} + If {\bf No}, the operating system is not required to support keeping track of + the file number and reporting it in the ({\bf MTIOCGET} ioctl). The default + is {\bf Yes}. If you must set this to No, Bacula will do the proper file + position determination, but it is very unfortunate because it means that + tape movement is very inefficient. + Fortunately, this operation system deficiency seems to be the case only + on a few *BSD systems. Operating systems known to work correctly are + Solaris, Linux and FreeBSD. + +\item [BSF at EOM = {\it Yes|No}] + \index[sd]{BSF at EOM} + \index[sd]{Directive!BSF at EOM} + If {\bf No}, the default, no special action is taken by Bacula with the End + of Medium (end of tape) is reached because the tape will be positioned after + the last EOF tape mark, and Bacula can append to the tape as desired. + However, on some systems, such as FreeBSD, when Bacula reads the End of + Medium (end of tape), the tape will be positioned after the second EOF tape + mark (two successive EOF marks indicated End of Medium). If Bacula appends + from that point, all the appended data will be lost. The solution for such + systems is to specify {\bf BSF at EOM} which causes Bacula to backspace over + the second EOF mark. Determination of whether or not you need this directive + is done using the {\bf test} command in the {\bf btape} program. + +\item [TWO EOF = {\it Yes|No}] + \index[sd]{TWO EOF} + \index[sd]{Directive!TWO EOF} + If {\bf Yes}, Bacula will write two end of file marks when terminating a tape +-- i.e. after the last job or at the end of the medium. If {\bf No}, the +default, Bacula will only write one end of file to terminate the tape. + +\item [Backward Space Record = {\it Yes|No}] + \index[sd]{Backward Space Record} + \index[sd]{Directive!Backward Space Record} + If {\it Yes}, the archive device supports the {\tt MTBSR ioctl} to backspace + records. If {\it No}, this call is not used and the device must be rewound + and advanced forward to the desired position. Default is {\bf Yes} for non + random-access devices. This function if enabled is used at the end of a + Volume after writing the end of file and any ANSI/IBM labels to determine whether + or not the last block was written correctly. If you turn this function off, + the test will not be done. This causes no harm as the re-read process is + precautionary rather than required. + +\item [Backward Space File = {\it Yes|No}] + \index[sd]{Backward Space File} + \index[sd]{Directive!Backward Space File} + If {\it Yes}, the archive device supports the {\bf MTBSF} and {\bf MTBSF + ioctl}s to backspace over an end of file mark and to the start of a file. If + {\it No}, these calls are not used and the device must be rewound and + advanced forward to the desired position. Default is {\bf Yes} for non + random-access devices. + +\item [Forward Space Record = {\it Yes|No}] + \index[sd]{Forward Space Record} + \index[sd]{Directive!Forward Space Record} + If {\it Yes}, the archive device must support the {\bf MTFSR ioctl} to + forward space over records. If {\bf No}, data must be read in order to + advance the position on the device. Default is {\bf Yes} for non + random-access devices. + +\item [Forward Space File = {\it Yes|No}] + \index[sd]{Forward Space File} + \index[sd]{Directive!Forward Space File} + If {\bf Yes}, the archive device must support the {\tt MTFSF ioctl} to + forward space by file marks. If {\it No}, data must be read to advance the + position on the device. Default is {\bf Yes} for non random-access devices. + +\item [Offline On Unmount = {\it Yes|No}] + \index[sd]{Offline On Unmount} + \index[sd]{Directive!Offline On Unmount} + The default for this directive is {\bf No}. If {\bf Yes} the archive device + must support the {\tt MTOFFL ioctl} to rewind and take the volume offline. In + this case, Bacula will issue the offline (eject) request before closing the + device during the {\bf unmount} command. If {\bf No} Bacula will not attempt + to offline the device before unmounting it. After an offline is issued, the + cassette will be ejected thus {\bf requiring operator intervention} to + continue, and on some systems require an explicit load command to be issued + ({\bf mt -f /dev/xxx load}) before the system will recognize the tape. If you + are using an autochanger, some devices require an offline to be issued prior + to changing the volume. However, most devices do not and may get very + confused. + + If you are using a Linux 2.6 kernel or other OSes + such as FreeBSD or Solaris, the Offline On Unmount will leave the drive + with no tape, and Bacula will not be able to properly open the drive and + may fail the job. For more information on this problem, please see the + \ilink{description of Offline On Unmount}{NoTapeInDrive} in the Tape + Testing chapter. + + +\item [Maximum Volume Size = {\it size}] + \index[sd]{Maximum Volume Size} + \index[sd]{Directive!Maximum Volume Size} + No more than {\bf size} bytes will be written onto a given volume on the + archive device. This directive is used mainly in testing Bacula to + simulate a small Volume. It can also be useful if you wish to limit the + size of a File Volume to say less than 2GB of data. In some rare cases + of really antiquated tape drives that do not properly indicate when the + end of a tape is reached during writing (though I have read about such + drives, I have never personally encountered one). Please note, this + directive is deprecated (being phased out) in favor of the {\bf Maximum + Volume Bytes} defined in the Director's configuration file. + +\item [Maximum File Size = {\it size}] + \index[sd]{Maximum File Size} + \index[sd]{Directive!Maximum File Size} + No more than {\bf size} bytes will be written into a given logical file + on the volume. Once this size is reached, an end of file mark is + written on the volume and subsequent data are written into the next + file. Breaking long sequences of data blocks with file marks permits + quicker positioning to the start of a given stream of data and can + improve recovery from read errors on the volume. The default is one + Gigabyte. This directive creates EOF marks only on tape media. + However, regardless of the medium type (tape, disk, DVD, ...) each time + a the Maximum File Size is exceeded, a record is put into the catalog + database that permits seeking to that position on the medium for + restore operations. If you set this to a small value (e.g. 1MB), + you will generate lots of database records (JobMedia) and may + significantly increase CPU/disk overhead. + + Note, this directive does not limit the size of Volumes that Bacula + will create regardless of whether they are tape or disk volumes. It + changes only the number of EOF marks on a tape and the number of + block positioning records (see below) that are generated. If you + want to limit the size of all Volumes for a particular device, use + the {\bf Maximum Volume Size} directive (above), or use the + {\bf Maximum Volume Bytes} directive in the Director's Pool resource, + which does the same thing but on a Pool (Volume) basis. + +\item [Block Positioning = {\it yes|no}] + \index[sd]{Block Positioning} + \index[sd]{Directive!Block Positioning} + This directive tells Bacula not to use block positioning when doing restores. + Turning this directive off can cause Bacula to be {\bf extremely} slow + when restoring files. You might use this directive if you wrote your + tapes with Bacula in variable block mode (the default), but your drive + was in fixed block mode. The default is {\bf yes}. + +\item [Maximum Network Buffer Size = {\it bytes}] + \index[sd]{Maximum Network Buffer Size} + \index[sd]{Directive!Maximum Network Buffer Size} + where {\it bytes} specifies the initial network buffer size to use with the + File daemon. This size will be adjusted down if it is too large until + it is accepted by the OS. Please use care in setting this value since if + it is too large, it will be trimmed by 512 bytes until the OS is happy, + which may require a large number of system calls. The default value is + 32,768 bytes. + + The default size was chosen to be relatively large but not too big in + the case that you are transmitting data over Internet. It is clear that + on a high speed local network, you can increase this number and improve + performance. For example, some users have found that if you use a value + of 65,536 bytes they get five to ten times the throughput. Larger values for + most users don't seem to improve performance. If you are interested + in improving your backup speeds, this is definitely a place to + experiment. You will probably also want to make the corresponding change + in each of your File daemons conf files. + + +\item [Maximum Spool Size = {\it bytes}] + \index[sd]{Maximum Spool Size} + \index[sd]{Directive!Maximum Spool Size} + where the bytes specify the maximum spool size for all jobs that are running. + The default is no limit. + +\item [Maximum Job Spool Size = {\it bytes}] + \index[sd]{Maximum Job Spool Size} + \index[sd]{Directive!Maximum Job Spool Size} + where the bytes specify the maximum spool size for any one job that is + running. The default is no limit. + This directive is implemented only in version 1.37 and later. + +\item [Spool Directory = {\it directory}] + \index[sd]{Spool Directory} + \index[sd]{Directive!Spool Directory} + specifies the name of the directory to be used to store the spool files for + this device. This directory is also used to store temporary part files when + writing to a device that requires mount (DVD). The default is to use the + working directory. + +\item [Maximum Part Size = {\it bytes}] + \index[sd]{Maximum Part Size} + \index[sd]{Directive!Maximum Part Size} + This is the maximum size of a volume part file. The default is no limit. + This directive is implemented only in version 1.37 and later. + + If the device requires mount, it is transferred to the device when this size + is reached. In this case, you must take care to have enough disk space left + in the spool directory. + + Otherwise, it is left on the hard disk. + + It is ignored for tape and FIFO devices. + + +\end{description} + +\label{mountcodes} +\section{Edit Codes for Mount and Unmount Directives} +\index[general]{Directives!Edit Codes} +\index[general]{Edit Codes for Mount and Unmount Directives } + +Before submitting the {\bf Mount Command}, {\bf Unmount Command}, +{\bf Write Part Command}, or {\bf Free Space Command} directives +to the operating system, Bacula performs character substitution of the +following characters: + +\footnotesize +\begin{verbatim} + %% = % + %a = Archive device name + %e = erase (set if cannot mount and first part) + %n = part number + %m = mount point + %v = last part name (i.e. filename) +\end{verbatim} +\normalsize + + +\section{Devices that require a mount (DVD)} +\index[general]{Devices that require a mount (DVD)} +\index[general]{DVD!Devices that require a mount} + +All the directives in this section are implemented only in +Bacula version 1.37 and later and hence are available in version 1.38.6. + +As of version 1.39.5, the directives +"Requires Mount", "Mount Point", "Mount Command", and "Unmount Command" +apply to removable filesystems such as USB in addition to DVD. + +\begin{description} + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount} + \index[sd]{Directive!Requires Mount} + You must set this directive to {\bf yes} for DVD-writers, and to {\bf no} for + all other devices (tapes/files). This directive indicates if the device + requires to be mounted to be read, and if it must be written in a special way. + If it set, {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount Command} and + {\bf Write Part Command} directives must also be defined. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + \index[sd]{Directive!Mount Point} + Directory where the device can be mounted. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + \index[sd]{Directive!Mount Command} + Command that must be executed to mount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + \index[sd]{Directive!Unmount Command} + Command that must be executed to unmount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +\item [Write Part Command = {\it name-string}] + \index[sd]{Write Part Command} + \index[sd]{Directive!Write Part Command} + Command that must be executed to write a part to the device. Before the + command is executed, \%a is replaced with the Archive Device, \%m with the + Mount Point, \%e is replaced with 1 if we are writing the first part, + and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Write Part Command = "/path/dvd-handler %a write %e %v" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + +\item [Free Space Command = {\it name-string}] + \index[sd]{Free Space Command} + \index[sd]{Directive!Free Space Command} + Command that must be executed to check how much free space is left on the + device. Before the command is executed,\%a is replaced with the Archive + Device, \%m with the Mount Point, \%e is replaced with 1 if we are writing + the first part, and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Free Space Command = "/path/dvd-handler %a free" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + If you want to specify your own command, please look at the code of + dvd-handler to see what output Bacula expects from this command. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + If you do not set it, Bacula will expect there is always free space on the + device. + +\end{description} + +%% This pulls in the Autochanger resource from another file. +\label{AutochangerRes} +\label{AutochangerResource1} +\input{autochangerres} + + + + +\section{Capabilities} +\index[general]{Capabilities} + +\begin{description} + +\item [Label media = {\it Yes|No}] + \index[sd]{Label media} + \index[sd]{Directive!Label media} + If {\bf Yes}, permits this device to automatically label blank media + without an explicit operator command. It does so by using an internal + algorithm as defined on the \ilink{Label Format}{Label} record in each + Pool resource. If this is {\bf No} as by default, Bacula will label + tapes only by specific operator command ({\bf label} in the Console) or + when the tape has been recycled. The automatic labeling feature is most + useful when writing to disk rather than tape volumes. + +\item [Automatic mount = {\it Yes|No}] + \index[sd]{Automatic mount} + \index[sd]{Directive!Automatic mount} + If {\bf Yes} (the default), permits the daemon to examine the device to + determine if it contains a Bacula labeled volume. This is done + initially when the daemon is started, and then at the beginning of each + job. This directive is particularly important if you have set + {\bf Always Open = no} because it permits Bacula to attempt to read the + device before asking the system operator to mount a tape. However, + please note that the tape must be mounted before the job begins. + +\end{description} + +\section{Messages Resource} +\label{MessagesResource1} +\index[general]{Resource!Messages} +\index[general]{Messages Resource} + +For a description of the Messages Resource, please see the +\ilink{Messages Resource}{MessagesChapter} Chapter of this +manual. + +\section{Sample Storage Daemon Configuration File} +\label{SampleConfiguration} +\index[general]{File!Sample Storage Daemon Configuration} +\index[general]{Sample Storage Daemon Configuration File} + +A example Storage Daemon configuration file might be the following: + +\footnotesize +\begin{verbatim} +# +# Default Bacula Storage Daemon Configuration file +# +# For Bacula release 1.37.2 (07 July 2005) -- gentoo 1.4.16 +# +# You may need to change the name of your tape drive +# on the "Archive Device" directive in the Device +# resource. If you change the Name and/or the +# "Media Type" in the Device resource, please ensure +# that bacula-dir.conf has corresponding changes. +# +Storage { # definition of myself + Name = rufus-sd + Address = rufus + WorkingDirectory = "$HOME/bacula/bin/working" + Pid Directory = "$HOME/bacula/bin/working" + Maximum Concurrent Jobs = 20 +} +# +# List Directors who are permitted to contact Storage daemon +# +Director { + Name = rufus-dir + Password = "ZF9Ctf5PQoWCPkmR3s4atCB0usUPg+vWWyIo2VS5ti6k" +} +# +# Restricted Director, used by tray-monitor to get the +# status of the storage daemon +# +Director { + Name = rufus-mon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" + Monitor = yes +} +# +# Devices supported by this Storage daemon +# To connect, the Director's bacula-dir.conf must have the +# same Name and MediaType. +# +Autochanger { + Name = Autochanger + Device = Drive-1 + Device = Drive-2 + Changer Command = "/home/kern/bacula/bin/mtx-changer %c %o %S %a %d" + Changer Device = /dev/sg0 +} + +Device { + Name = Drive-1 # + Drive Index = 0 + Media Type = DLT-8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +} + +Device { + Name = Drive-2 # + Drive Index = 1 + Media Type = DLT-8000 + Archive Device = /dev/nst1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +} + +Device { + Name = "HP DLT 80" + Media Type = DLT8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; +} +#Device { +# Name = SDT-7000 # +# Media Type = DDS-2 +# Archive Device = /dev/nst0 +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +#} +#Device { +# Name = Floppy +# Media Type = Floppy +# Archive Device = /mnt/floppy +# RemovableMedia = yes; +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = no; +#} +#Device { +# Name = FileStorage +# Media Type = File +# Archive Device = /tmp +# LabelMedia = yes; # lets Bacula label unlabeled media +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# RemovableMedia = no; +# AlwaysOpen = no; +#} +#Device { +# Name = "NEC ND-1300A" +# Media Type = DVD +# Archive Device = /dev/hda +# LabelMedia = yes; # lets Bacula label unlabeled media +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# RemovableMedia = yes; +# AlwaysOpen = no; +# MaximumPartSize = 800M; +# RequiresMount = yes; +# MountPoint = /mnt/cdrom; +# MountCommand = "/bin/mount -t iso9660 -o ro %a %m"; +# UnmountCommand = "/bin/umount %m"; +# SpoolDirectory = /tmp/backup; +# WritePartCommand = "/etc/bacula/dvd-handler %a write %e %v" +# FreeSpaceCommand = "/etc/bacula/dvd-handler %a free" +#} +# +# A very old Exabyte with no end of media detection +# +#Device { +# Name = "Exabyte 8mm" +# Media Type = "8mm" +# Archive Device = /dev/nst0 +# Hardware end of medium = No; +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = Yes; +# RemovableMedia = yes; +#} +# +# Send all messages to the Director, +# mount messages also are sent to the email address +# +Messages { + Name = Standard + director = rufus-dir = all + operator = root = mount +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/install/translate_images.pl b/docs/manuals/de/install/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/install/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/install/update_version b/docs/manuals/de/install/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/install/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/install/update_version.in b/docs/manuals/de/install/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/install/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/install/version.tex b/docs/manuals/de/install/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/install/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/install/version.tex.in b/docs/manuals/de/install/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/install/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/de/problems/Makefile b/docs/manuals/de/problems/Makefile new file mode 100644 index 00000000..55cb58c6 --- /dev/null +++ b/docs/manuals/de/problems/Makefile @@ -0,0 +1,136 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=problems + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Problem Resolution Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Proble*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + @rm -f ${DOC}i-*.tex + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/problems/Makefile.in b/docs/manuals/de/problems/Makefile.in new file mode 100644 index 00000000..55cb58c6 --- /dev/null +++ b/docs/manuals/de/problems/Makefile.in @@ -0,0 +1,136 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=problems + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Problem Resolution Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Proble*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + @rm -f ${DOC}i-*.tex + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/problems/check_tex.pl b/docs/manuals/de/problems/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/problems/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/problems/do_echo b/docs/manuals/de/problems/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/de/problems/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/de/problems/faq.tex b/docs/manuals/de/problems/faq.tex new file mode 100644 index 00000000..df0f0554 --- /dev/null +++ b/docs/manuals/de/problems/faq.tex @@ -0,0 +1,876 @@ +%% +%% +% TODO: maybe merge all this FAQ in with the appropriate section? +% TODO: and use detailed indexing to help reader + +\chapter{Bacula Frequently Asked Questions} +\label{FaqChapter} +\index[general]{Questions!Bacula Frequently Asked } +\index[general]{Bacula Frequently Asked Questions } + +These are questions that have been submitted over time by the +Bacula users. The following +FAQ is very useful, but it is not always up to date +with newer information, so after reading it, if you don't find what you +want, you might try the Bacula wiki maintained by Frank Sweetser, which +contains more than just a FAQ: +\elink{http://wiki.bacula.org}{\url{http://wiki.bacula.org}} +or go directly to the FAQ at: +\elink{http://wiki.bacula.org/doku.php?id=faq} +{\url{http://wiki.bacula.org/doku.php?id=faq}}. + +Please also see +\ilink{the bugs section}{BugsChapter} of this document for a list +of known bugs and solutions. + +\begin{description} +\label{what} +\section{What is Bacula?} +\item [What is {\bf Bacula}? ] + \index[general]{What is Bacula? } + {\bf Bacula} is a network backup and restore program. + +\section{Does Bacula support Windows?} +\item [Does Bacula support Windows?] +\index[general]{Does Bacula support Windows? } + Yes, Bacula compiles and runs on Windows machines (Win98, WinMe, WinXP, + WinNT, Win2003, and Win2000). We provide a binary version of the Client + (bacula-fd), but have not tested the Director nor the Storage daemon. + Note, Win95 is no longer supported because it doesn't have the + GetFileAttributesExA API call. + + +\label{lang} +\section{What language is Bacula written in?} +\item [What language is Bacula written in?] +\index[general]{What language is Bacula written in? } + It is written in C++, but it is mostly C code using only a limited set of + the C++ extensions over C. Thus Bacula is completely compiled using the + C++ compiler. There are several modules, including the Win32 interface, that + are written using the object oriented C++ features. Over time, we are slowly + adding a larger subset of C++. + +\label{run} +\section{On what machines does Bacula run?} +\item [On what machines does Bacula run? ] + \index[general]{On what machines does Bacula run? } + {\bf Bacula} builds and executes on Red Hat Linux (versions RH7.1-RHEL + 4.0, Fedora, SuSE, Gentoo, Debian, Mandriva, ...), FreeBSD, Solaris, + Alpha, SGI (client), NetBSD, OpenBSD, Mac OS X (client), and Win32. + + Bacula has been my only backup tool for over seven years backing up 8 + machines nightly (6 Linux boxes running SuSE, previously + Red Hat and Fedora, a WinXP machine, and a WinNT machine). + + +\label{stable} +\section{Is Bacula Stable?} +\item [Is Bacula Stable? ] +\index[general]{Is Bacula Stable? } + Yes, it is remarkably stable, but remember, there are still a lot of + unimplemented or partially implemented features. With a program of this + size (150,000+ lines of C++ code not including the SQL programs) there + are bound to be bugs. The current test environment (a twisted pair + local network and a HP DLT backup tape) is not exactly ideal, so + additional testing on other sites is necessary. The File daemon has + never crashed -- running months at a time with no intervention. The + Storage daemon is remarkably stable with most of the problems arising + during labeling or switching tapes. Storage daemon crashes are rare + but running multiple drives and simultaneous jobs sometimes (rarely) + problems. + The Director, given the multitude of functions it fulfills is also + relatively stable. In a production environment, it rarely if ever + crashes. Of the three daemons, the Director is the most prone to having + problems. Still, it frequently runs several months with no problems. + + There are a number of reasons for this stability. + + \begin{enumerate} + \item The program is constantly checking the chain of allocated + memory buffers to ensure that no overruns have occurred. \\ + \item All memory leaks (orphaned buffers) are reported each time the + program terminates.\\ + \item Any signal (segmentation fault, ...) generates a + traceback that is emailed to the developer. This permits quick + resolution of bugs even if they only show up rarely in a production + system.\\ + \item There is a reasonably comprehensive set of regression tests + that avoids re-creating the most common errors in new versions of + Bacula. + \end{enumerate} + +\label{AuthorizationErrors} +\section{I'm Getting Authorization Errors. What is Going On? } +\item [I'm Getting Authorization Errors. What is Going On? ] +\index[general]{Authorization Errors} +\index[general]{Concurrent Jobs} + For security reasons, Bacula requires that both the File daemon and the + Storage daemon know the name of the Director as well as its password. As a + consequence, if you change the Director's name or password, you must make + the corresponding change in the Storage daemon's and in the File daemon's + configuration files. + + During the authorization process, the Storage daemon and File daemon + also require that the Director authenticates itself, so both ends + require the other to have the correct name and password. + + If you have edited the conf files and modified any name or any password, + and you are getting authentication errors, then your best bet is to go + back to the original conf files generated by the Bacula installation + process. Make only the absolutely necessary modifications to these + files -- e.g. add the correct email address. Then follow the + instructions in the \ilink{ Running Bacula}{TutorialChapter} chapter of + this manual. You will run a backup to disk and a restore. Only when + that works, should you begin customization of the conf files. + + Another reason that you can get authentication errors is if you are + running Multiple Concurrent Jobs in the Director, but you have not set + them in the File daemon or the Storage daemon. Once you reach their + limit, they will reject the connection producing authentication (or + connection) errors. + + If you are having problems connecting to a Windows machine that + previously worked, you might try restarting the Bacula service since + Windows frequently encounters networking connection problems. + + Some users report that authentication fails if there is not a proper + reverse DNS lookup entry for the machine. This seems to be a + requirement of gethostbyname(), which is what Bacula uses to translate + names into IP addresses. If you cannot add a reverse DNS entry, or you + don't know how to do so, you can avoid the problem by specifying an IP + address rather than a machine name in the appropriate Bacula conf file. + + Here is a picture that indicates what names/passwords in which + files/Resources must match up: + + \includegraphics{./Conf-Diagram.eps} + + In the left column, you will find the Director, Storage, and Client + resources, with their names and passwords -- these are all in {\bf + bacula-dir.conf}. The right column is where the corresponding values + should be found in the Console, Storage daemon (SD), and File daemon (FD) + configuration files. + + Another thing to check is to ensure that the Bacula component you are + trying to access has {\bf Maximum Concurrent Jobs} set large enough to + handle each of the Jobs and the Console that want to connect + simultaneously. Once the maximum connections has been reached, each + Bacula component will reject all new connections. + + Finally, make sure you have no {\bf hosts.allow} or {\bf hosts.deny} + file that is not permitting access to the site trying to connect. + +\label{AccessProblems} +\section{Bacula Runs Fine but Cannot Access a Client on a Different Machine. + Why? } +\item [Bacula Runs Fine but Cannot Access a Client on a Different Machine. + Why? ] +\index[general]{Cannot Access a Client} + There are several reasons why Bacula could not contact a client on a + different machine. They are: + +\begin{itemize} +\item It is a Windows Client, and the client died because of an improper + configuration file. Check that the Bacula icon is in the system tray and the + the menu items work. If the client has died, the icon will disappear only + when you move the mouse over the icon. +\item The Client address or port is incorrect or not resolved by DNS. See if + you can ping the client machine using the same address as in the Client + record. +\item You have a firewall, and it is blocking traffic on port 9102 between + the Director's machine and the Client's machine (or on port 9103 between the + Client and the Storage daemon machines). +\item Your password or names are not correct in both the Director and the + Client machine. Try configuring everything identical to how you run the + client on the same machine as the Director, but just change the Address. If + that works, make the other changes one step at a time until it works. +\item You may also be having problems between your File daemon and your + Storage daemon. The name you use in the Storage resource of your + Director's conf file must be known (resolvable) by the File daemon, + because it is passed symbolically to the File daemon, which then + resolves it to get an IP address used to contact the Storage daemon. +\item You may have a {\bf hosts.allow} or {\bf hosts.deny} file that is + not permitting access. +\end{itemize} + +\label{startover} +\section{My Catalog is Full of Test Runs, How Can I Start Over?} +\item [My Catalog is Full of Test Runs, How Can I Start Over? ] + \index[general]{My Catalog is Full of Test Runs, How Can I Start Over? } + If you are using MySQL do the following: + +\footnotesize +\begin{verbatim} + cd /src/cats + ./drop_mysql_tables + ./make_mysql_tables + +\end{verbatim} +\normalsize + +If you are using SQLite, do the following: + +\footnotesize +\begin{verbatim} + Delete bacula.db from your working directory. + cd /src/cats + ./drop_sqlite_tables + ./make_sqlite_tables + +\end{verbatim} +\normalsize + +Then write an EOF on each tape you used with {\bf Bacula} using: + +\footnotesize +\begin{verbatim} +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +where you need to adjust the device name for your system. + +\label{restorehang} +\section{I Run a Restore Job and Bacula Hangs. What do I do?} +\item [I Run a Restore Job and Bacula Hangs. What do I do?] +\index[general]{I Run a Restore Job and Bacula Hangs. What do I do? } + On Bacula version 1.25 and prior, it expects you to have the correct + tape mounted prior to a restore. On Bacula version 1.26 and higher, it + will ask you for the tape, and if the wrong one is mounted, it will + inform you. + + If you have previously done an {\bf unmount} command, all Storage daemon + sessions (jobs) will be completely blocked from using the drive + unmounted, so be sure to do a {\bf mount} after your unmount. If in + doubt, do a second {\bf mount}, it won't cause any harm. + +\label{windowstart} +\section{I Cannot Get My Windows Client to Start Automatically? } +\item [I Cannot Get My Windows Client to Start Automatically? ] +\index[general]{Windows Auto Start} + You are probably having one of two problems: either the Client is dying + due to an incorrect configuration file, or you didn't do the + Installation commands necessary to install it as a Windows Service. + + For the first problem, see the next FAQ question. For the second + problem, please review the \ilink{ Windows Installation + instructions}{Win32Chapter} in this manual. + +\label{windowsdie} +\section{My Windows Client Immediately Dies When I Start It} +\item [My Windows Client Immediately Dies When I Start It] +\index[general]{Windows Client Dies} +The most common problem is either that the configuration file is not where +it expects it to be, or that there is an error in the configuration file. +You must have the configuration file in {\bf +c:\textbackslash{}bacula\textbackslash{}bin\textbackslash{}bacula-fd.conf}. + +To {\bf see} what is going on when the File daemon starts on Windows, do the +following: + +\footnotesize +\begin{verbatim} + Start a DOS shell Window. + cd c:\bacula\bin + bacula-fd -d100 -c c:\bacula\bin\bacula-fd.conf + +\end{verbatim} +\normalsize + +This will cause the FD to write a file {\bf bacula.trace} in the current +directory, which you can examine and thereby determine the problem. + +\label{scroll} +\item [When I Start the Console, the Error Messages Fly By. How can I see + them? ] +\index[general]{Error Messages} + Either use a shell window with a scroll bar, or use the gnome-console. + In any case, you probably should be logging all output to a file, and + then you can simply view the file using an editor or the {\bf less} + program. To log all output, I have the following in my Director's + Message resource definition: + +\footnotesize +\begin{verbatim} + append = "/home/kern/bacula/bin/log" = all, !skipped + +\end{verbatim} +\normalsize + +Obviously you will want to change the filename to be appropriate for your +system. + +\label{nobackup} +\section{My backups are not working on my Windows + Client. What should I do?} +\item [I didn't realize that the backups were not working on my Windows + Client. What should I do? ] +\index[general]{Backups Failing} +You should be sending yourself an email message for each job. This will avoid +the possibility of not knowing about a failed backup. To do so put something +like: + +\footnotesize +\begin{verbatim} + Mail = yourname@yourdomain = all, !skipped + +\end{verbatim} +\normalsize + +in your Director's message resource. You should then receive one email for +each Job that ran. When you are comfortable with what is going on (it took +me 9 months), you might change that to: + +\footnotesize +\begin{verbatim} + MailOnError = yourname@yourdomain = all, !skipped + +\end{verbatim} +\normalsize + +then you only get email messages when a Job errors as is the case for your +Windows machine. + +You should also be logging the Director's messages, please see the previous +FAQ for how to do so. + +\label{sched} +\section{All my Jobs are scheduled for the same time. Will this cause + problems?} +\item [All my Jobs are scheduled for the same time. Will this cause + problems? ] +\index[general]{Schedule problems} + No, not at all. Bacula will schedule all the Jobs at the same time, but + will run them one after another unless you have increased the number of + simultaneous jobs in the configuration files for the Director, the File + daemon, and the Storage daemon. The appropriate configuration record is + {\bf Maximum Concurrent Jobs = nn}. At the current time, we recommend + that you leave this set to {\bf 1} for the Director. + +\label{disk} +\section{Can Bacula Backup My System To Files instead of Tape?} +\item [Can Bacula Backup My System To Files instead of Tape? ] +\index[general]{Backup to Disk} + Yes, in principle, Bacula can backup to any storage medium as long as + you have correctly defined that medium in the Storage daemon's Device + resource. For an example of how to backup to files, please see the + \ilink{Pruning Example}{PruningExample} in the Recycling chapter of this + manual. Also, there is a whole chapter devoted to \ilink{Basic Volume + Management}{DiskChapter}. This chapter was originally written to + explain how to write to disk, but was expanded to include volume + management. It is, however, still quite a good chapter to read. + +\label{testbackup} +\section{Can I use a dummy device to test the backup?} + Yes, to have a {\sl Virtual} device which just consumes data, you can use a + FIFO device (see \ilink{Stored configuration}{SetupFifo}). + It's useful to test a backup. +\footnotesize +\begin{verbatim} +Device { + Name = NULL + Media Type = NULL + Device Type = Fifo + Archive Device = /dev/null + LabelMedia = yes + Random Access = no + AutomaticMount = no + RemovableMedia = no + MaximumOpenWait = 60 + AlwaysOpen = no +} +\end{verbatim} +\normalsize + +\label{bigfiles} +\section{Can Bacula Backup and Restore Files Bigger than 2 Gigabytes?} +\item [Can Bacula Backup and Restore Files Bigger than 2 Gigabytes?] +\index[general]{Large file support} +If your operating system permits it, and you are running Bacula version +1.26 or later, the answer is yes. To the best of our knowledge all client +system supported by Bacula can handle files bigger 2 Gigabytes. + +\label{cancel} +\section{I want to stop a job.} +%% Is there a better way than "./bacula stop" to stop it?} +\item [I Started A Job then Decided I Really Did Not Want to Run It. Is + there a better way than {\bf ./bacula stop} to stop it?] +\index[general]{Cancelling jobs} + Yes, you normally should use the Console command {\bf cancel} to cancel + a Job that is either scheduled or running. If the Job is scheduled, it + will be marked for cancellation and will be canceled when it is + scheduled to start. If it is running, it will normally terminate after + a few minutes. If the Job is waiting on a tape mount, you may need to + do a {\bf mount} command before it will be canceled. + +\label{trademark} +\section{Why have You Trademarked the Name Bacula?} +\item [Why have You Trademarked the Name + Bacula\raisebox{.6ex}{{\footnotesize \textsuperscript{\textregistered}}}?] +\index[general]{Bacula Trademark} +We have trademarked the name Bacula to ensure that all media written by any +program named Bacula will always be compatible. Anyone may use the name +Bacula, even in a derivative product as long as it remains totally compatible +in all respects with the program defined here. + +\label{docversion} +\section{Why is the Online Document for Version 1.39 but the Released Version is 1.38?} +\item [Why is the Online Document for Version 1.39 of Bacula when the + Current Version is 1.38?] +\index[general]{Multiple manuals} +As Bacula is being developed, the document is also being enhanced, more +often than not it has clarifications of existing features that can be very +useful to our users, so we publish the very latest document. Fortunately +it is rare that there are confusions with new features. + +If you want to read a document that pertains only to a specific version, +please use the one distributed in the source code. The web site also has +online versions of both the released manual and the current development +manual. + +\label{sure} +\section{Does Bacula really save and restore all files?} +\item [How Can I Be Sure that Bacula Really Saves and Restores All Files? ] +\index[general]{Checking Restores} + It is really quite simple, but took me a while to figure + out how to "prove" it. First make a Bacula Rescue disk, see the + \ilink{Disaster Recovery Using Bacula}{RescueChapter} chapter + of this manual. + Second, you run a full backup of all your files on all partitions. + Third, you run an Verify InitCatalog Job on the same FileSet, which + effectively makes a record of all the files on your system. Fourth, you + run a Verify Catalog job and assure yourself that nothing has changed + (well, between an InitCatalog and Catalog one doesn't expect anything). + Then do the unthinkable, write zeros on your MBR (master boot record) + wiping out your hard disk. Now, restore your whole system using your + Bacula Rescue disk and the Full backup you made, and finally re-run the + Verify Catalog job. You will see that with the exception of the + directory modification and access dates and the files changed during the + boot, your system is identical to what it was before you wiped your hard + disk. + Alternatively you could do the wiping and restoring to another computer + of the same type. + +\label{upgrade} +\section{I want an Incremental but Bacula runs it as a Full backup. Why?} +\item [I did a Full backup last week, but now in running an Incremental, + Bacula says it did not find a FULL backup, so it did a FULL backup. Why?] +\index[general]{FULL backup not found} + Before doing an Incremental or a Differential + backup, Bacula checks to see if there was a prior Full backup of the + same Job that terminated successfully. If so, it uses the date that + full backup started as the time for comparing if files have changed. If + Bacula does not find a successful full backup, it proceeds to do one. + Perhaps you canceled the full backup, or it terminated in error. In + such cases, the full backup will not be successful. You can check by + entering {\bf list jobs} and look to see if there is a prior Job with + the same Name that has Level F and JobStatus T (normal termination). + + Another reason why Bacula may not find a suitable Full backup is that + every time you change the FileSet, Bacula will require a new Full + backup. This is necessary to ensure that all files are properly backed + up in the case where you have added more files to the FileSet. + Beginning with version 1.31, the FileSets are also dated when they are + created, and this date is displayed with the name when you are listing + or selecting a FileSet. For more on backup levels see below. + + See also {\bf Ignore FileSet Changes} in the + \ilink{FileSet Resource definition}{FileSetResource} in the Director + chapter of this document. + +\label{filenamelengths} +\section{Do you really handle unlimited path lengths?} +\item [How Can You Claim to Handle Unlimited Path and Filename Lengths + when All Other Programs Have Fixed Limits?] +\index[general]{Path and Filename Lengths} + Most of those other programs have been around for a long time, in fact + since the beginning of Unix, which means that they were designed for + rather small fixed length path and filename lengths. Over the years, + these restrictions have been relaxed allowing longer names. Bacula on + the other hand was designed in 2000, and so from the start, Path and + Filenames have been kept in buffers that start at 256 bytes in length, + but can grow as needed to handle any length. Most of the work is + carried out by lower level routines making the coding rather easy. + + Note that due to limitations Win32 path and filenames cannot exceed + 260 characters. By using Win32 Unicode functions, we will remove this + restriction in later versions of Bacula. + +\label{unique} +\section{What Is the Really Unique Feature of Bacula?} +\item [What Is the Really Unique Feature of Bacula?] +\index[general]{Unique Feature of Bacula} + Well, it is hard to come up with unique features when backup programs + for Unix machines have been around since the 1960s. That said, I + believe that Bacula is the first and only program to use a standard SQL + interface to catalog its database. Although this adds a bit of + complexity and possibly overhead, it provides an amazingly rich set of + features that are easy to program and enhance. The current code has + barely scratched the surface in this regard (version 1.38). + + The second feature, which gives a lot of power and flexibility to Bacula + is the Bootstrap record definition. + + The third unique feature, which is currently (1.30) unimplemented, and + thus can be called vaporware :-), is Base level saves. When + implemented, this will enormously reduce tape usage. + +\label{sequence} +\section{How can I force one job to run after another?} +\item [If I Run Multiple Simultaneous Jobs, How Can I Force One + Particular Job to Run After Another Job? ] +\index[general]{Multiple Simultaneous Jobs} +Yes, you can set Priorities on your jobs so that they run in the order you +specify. Please see: +\ilink{the Priority record}{Priority} in the Job resource. + +\label{nomail} +\section{I Am Not Getting Email Notification, What Can I Do? } +\item [I Am Not Getting Email Notification, What Can I Do? ] +\index[general]{No Email Notification} + The most common problem is that you have not specified a fully qualified + email address and your bsmtp server is rejecting the mail. The next + most common problem is that your bsmtp server doesn't like the syntax on + the From part of the message. For more details on this and other + problems, please see the \ilink{ Getting Email Notification to + Work}{email} section of the Tips chapter of this manual. The section + \ilink{ Getting Notified of Job Completion}{notification} of the Tips + chapter may also be useful. For more information on the {\bf bsmtp} + mail program, please see \ilink{bsmtp in the Volume Utility Tools + chapter}{bsmtp} of this manual. + +\label{periods} +\section{My retention periods don't work} +\item [I Change Recycling, Retention Periods, or File Sizes in my Pool + Resource and they Still Don't Work.] +\index[general]{Recycling} +\index[general]{Retention Periods} +\index[general]{Pool changes} + The different variables associated with a Pool are defined in the Pool + Resource, but are actually read by Bacula from the Catalog database. On + Bacula versions prior to 1.30, after changing your Pool Resource, you must + manually update the corresponding values in the Catalog by using the {\bf + update pool} command in the Console program. In Bacula version 1.30, Bacula + does this for you automatically every time it starts. + + When Bacula creates a Media record (Volume), it uses many default values from + the Pool record. If you subsequently change the Pool record, the new values + will be used as a default for the next Volume that is created, but if you + want the new values to apply to existing Volumes, you must manually update + the Volume Catalog entry using the {\bf update volume} command in the Console + program. + +\label{CompressionNotWorking} +\section{Why aren't my files compressed?} +\item [I Have Configured Compression On, But None of My Files Are + Compressed. Why?] +\index[general]{Compression} + There are two kinds of compression. One is tape compression. This is done by + the tape drive hardware, and you either enable or disable it with system + tools such as {\bf mt}. This compression works independently of Bacula, + and when it is enabled, you should not use the Bacula software + compression. + + Bacula also has software compression code in the File daemons, which you + normally need to enable only when backing up to file Volumes. There are + two conditions necessary to enable the Bacula software compression. + +\begin{enumerate} +\item You must have the zip development libraries loaded on your system + when building Bacula and Bacula must find this library, normally {\bf + /usr/lib/libz.a}. On Red Hat systems, this library is provided by the + {\bf zlib-devel} rpm. + + If the library is found by Bacula during the {\bf ./configure} it will + be mentioned in the {\bf config.out} line by: + +\footnotesize +\begin{verbatim} + ZLIB support: yes + +\end{verbatim} +\normalsize + +\item You must add the {\bf compression=gzip} option on your Include + statement in the Director's configuration file. +\end{enumerate} + +\label{NewTape} +\item [Bacula is Asking for a New Tape After 2 GB of Data but My Tape + holds 33 GB. Why?] +\index[general]{Tape capacity} +There are several reasons why Bacula will request a new tape. + +\begin{itemize} +\item There is an I/O error on the tape. Bacula prints an error message and + requests a new tape. Bacula does not attempt to continue writing after an + I/O error. +\item Bacula encounters and end of medium on the tape. This is not always + distinguishable from an I/O error. +\item You have specifically set some size limitation on the tape. For example + the {\bf Maximum Volume Bytes} or {\bf Maximum Volume Files} in the + Director's Pool resource, or {\bf Maximum Volume Size} in the Storage + daemon's Device resource. +\end{itemize} + +\label{LevelChanging} +\section{Incremental backups are not working} +\item [Bacula is Not Doing the Right Thing When I Request an Incremental + Backup. Why?] +\index[general]{Incremental backups} + As explained in one of the previous questions, Bacula will automatically + upgrade an Incremental or Differential job to a Full backup if it cannot + find a prior Full backup or a suitable Full backup. For the gory + details on how/when Bacula decides to upgrade levels please see the + \ilink{Level record}{Level} in the Director's configuration chapter of + this manual. + + If after reading the above mentioned section, you believe that Bacula is not + correctly handling the level (Differential/Incremental), please send us the + following information for analysis: + +\begin{itemize} +\item Your Director's configuration file. +\item The output from {\bf list jobs} covering the period where you are + having the problem. +\item The Job report output from the prior Full save (not critical). +\item An {\bf llist jobid=nnn} where nnn is the JobId of the prior Full save. + +\item The Job report output from the save that is doing the wrong thing (not + critical). +\item An {\bf llist jobid=nnn} where nnn is the JobId of the job that was not + correct. +\item An explanation of what job went wrong and why you think it did. + \end{itemize} + +The above information can allow us to analyze what happened, without it, +there is not much we can do. + +\label{WaitForever} +\section{I am waiting forever for a backup of an offsite machine} +\item [I am Backing Up an Offsite Machine with an Unreliable Connection. + The Director Waits Forever for the Client to Contact the SD. What Can I + Do?] +\index[general]{Backing Up Offsite Machines} + Bacula was written on the assumption that it will have a good TCP/IP + connection between all the daemons. As a consequence, the current + Bacula doesn't deal with faulty connections very well. This situation + is slowly being corrected over time. + + There are several things you can do to improve the situation. + +\begin{itemize} +\item Upgrade to version 1.32 and use the new SDConnectTimeout record. For + example, set: + +\footnotesize +\begin{verbatim} + SD Connect Timeout = 5 min + +\end{verbatim} +\normalsize + +in the FileDaemon resource. +\item Run these kinds of jobs after all other jobs. + \end{itemize} + +\label{sshHanging} +\section{SSH hangs forever after starting Bacula} +\item [When I ssh into a machine and start Bacula then attempt to exit, + ssh hangs forever.] +\index[general]{ssh hangs} + This happens because Bacula leaves stdin, stdout, and stderr open for + debug purposes. To avoid it, the simplest thing to do is to redirect + the output of those files to {\bf /dev/null} or another file in your + startup script (the Red Hat autostart scripts do this automatically). + For example, you start the Director with: + +\footnotesize +\begin{verbatim} + bacula-dir -c bacula-dir.conf ... 0>\&1 2>\&1 >/dev/null + +\end{verbatim} +\normalsize + +and likewise for the other daemons. + +\label{RetentionPeriods} +\section{I'm confused by retention periods} +\item [I'm confused by the different Retention periods: File Retention, + Job Retention, Volume Retention. Why are there so many?] +\index[general]{Retention Periods} + Yes, this certainly can be confusing. The basic reason for so many is + to allow flexibility. The File records take quite a lot of space in the + catalog, so they are typically records you want to remove rather + quickly. The Job records, take very little space, and they can be + useful even without the File records to see what Jobs actually ran and + when. One must understand that if the File records are removed from the + catalog, you cannot use the {\bf restore} command to restore an + individual file since Bacula no longer knows where it is. However, as + long as the Volume Retention period has not expired, the data will still + be on the tape, and can be recovered from the tape. + + For example, I keep a 30 day retention period for my Files to keep my + catalog from getting too big, but I keep my tapes for a minimum of one + year, just in case. + +\label{MaxVolumeSize} +\section{MaxVolumeSize is ignored} +\item [Why Does Bacula Ignore the MaxVolumeSize Set in my Pool?] +\index[general]{MaxVolumeSize} + The MaxVolumeSize that Bacula uses comes from the Media record, so most + likely you changed your Pool, which is used as the default for creating + Media records, {\bf after} you created your Volume. Check what is in + the Media record by doing: + +\footnotesize +\begin{verbatim} +llist Volume=xxx +\end{verbatim} +\normalsize + +If it doesn't have the right value, you can use: + +\footnotesize +\begin{verbatim} +update Volume=xxx +\end{verbatim} +\normalsize + +to change it. + +\label{ConnectionRefused} +\section{I get a Connection refused when connecting to my Client} +\item [In connecting to my Client, I get "ERR:Connection Refused. Packet + Size too big from File daemon:192.168.1.4:9102" Why?] +\index[general]{ERR:Connection Refused} + This is typically a communications error resulting from one of the + following: + + +\begin{itemize} +\item Old versions of Bacula, usually a Win32 client, where two threads were + using the same I/O packet. Fixed in more recent versions. Please upgrade. +\item Some other program such as an HP Printer using the same port (9102 in + this case). +\end{itemize} + +If it is neither of the above, please submit a bug report at +\elink{bugs.bacula.org}{http://bugs.bacula.org}. + +Another solution might be to run the daemon with the debug option by: + +\footnotesize +\begin{verbatim} + Start a DOS shell Window. + cd c:\bacula\bin + bacula-fd -d100 -c c:\bacula\bin\bacula-fd.conf + +\end{verbatim} +\normalsize + +This will cause the FD to write a file {\bf bacula.trace} in the current +directory, which you can examine to determine the problem. + +\section{Long running jobs die with Pipe Error} +\item [During long running jobs my File daemon dies with Pipe Error, or + some other communications error. Why?] +\index[general]{Communications Errors} +\index[general]{Pipe Errors} +\index[general]{slow} +\index[general]{Backups!slow} + There are a number of reasons why a connection might break. + Most often, it is a router between your two computers that times out + inactive lines (not respecting the keepalive feature that Bacula uses). + In that case, you can use the {\bf Heartbeat Interval} directive in + both the Storage daemon and the File daemon. + + In at least one case, the problem has been a bad driver for a Win32 + NVidia NForce 3 ethernet card with driver (4.4.2 17/05/2004). + In this case, a good driver is (4.8.2.0 06/04/2005). Moral of + the story, make sure you have the latest ethernet drivers + loaded, or use the following workaround as suggested by Thomas + Simmons for Win32 machines: + + Browse to: + Start \gt{} Control Panel \gt{} Network Connections + + Right click the connection for the nvidia adapter and select properties. + Under the General tab, click "Configure...". Under the Advanced tab set + "Checksum Offload" to disabled and click OK to save the change. + + Lack of communications, or communications that get interrupted can + also be caused by Linux firewalls where you have a rule that throttles + connections or traffic. For example, if you have: + +\footnotesize +\begin{verbatim} +iptables -t filter -A INPUT -m limit --limit 3/second --limit-burst 3 -j DROP +\end{verbatim} +\normalsize + + you will want to add the following rules {\bf before} the above rule: +\footnotesize +\begin{verbatim} +iptables -t filter -A INPUT --dport 9101 -j ACCEPT +iptables -t filter -A INPUT --dport 9102 -j ACCEPT +iptables -t filter -A INPUT --dport 9103 -j ACCEPT +\end{verbatim} +\normalsize + This will ensure that any Bacula traffic will not get terminated because + of high usage rates. + +\section{How do I tell the Job which Volume to use?} +\item[I can't figure out how to tell the job which volume to use] + \index[general]{What tape to mount} + This is an interesting statement. I now see that a number of people new to + Bacula have the same problem as you, probably from using programs like tar. + + In fact, you do not tell Bacula what tapes to use. It is the inverse. Bacula + tells you want tapes it wants. You put tapes at its disposition and it + chooses. + + Now, if you *really* want to be tricky and try to tell Bacula what to do, it + will be reasonable if for example you mount a valid tape that it can use on a + drive, it will most likely go ahead and use it. It also has a documented + algorithm for choosing tapes -- but you are asking for problems ... + + So, the trick is to invert your concept of things and put Bacula in charge of + handling the tapes. Once you do that, you will be fine. If you want to + anticipate what it is going to do, you can generally figure it out correctly + and get what you want. + + If you start with the idea that you are going to force or tell Bacula to use + particular tapes or you insist on trying to run in that kind of mode, you will + probably not be too happy. + + I don't want to worry about what tape has what data. That is what Bacula is + designed for. + + If you have an application where you *really* need to remove a tape each day + and insert a new one, it can be done the directives exist to accomplish that. + In such a case, one little "trick" to knowing what tape Bacula will want at + 2am while you are asleep is to run a tiny job at 4pm while you are still at + work that backs up say one directory, or even one file. You will quickly find + out what tape it wants, and you can mount it before you go home ... + +\label{Password generation} +\section{Password generation} +\item [How do I generate a password?] +\index[general]{MaxVolumeSize} + + Each daemon needs a password. This password occurs in the configuration + file for that daemon and in the bacula-dir.conf file. These passwords are + plain text. There is no special generation procedure. Most people just + use random text. + + Passwords are never sent over the wire in plain text. They are always + encrypted. + + Security surrounding these passwords is best left security to your + operating system. Passwords are not encrypted within Bacula + configuration files. + +\end{description} + \ No newline at end of file diff --git a/docs/manuals/de/problems/fdl.tex b/docs/manuals/de/problems/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/de/problems/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/problems/firewalls.tex b/docs/manuals/de/problems/firewalls.tex new file mode 100644 index 00000000..1e93c04e --- /dev/null +++ b/docs/manuals/de/problems/firewalls.tex @@ -0,0 +1,373 @@ +%% +%% + +\chapter{Dealing with Firewalls} +\label{FirewallsChapter} +\index[general]{Dealing with Firewalls } +\index[general]{Firewalls!Dealing with } + +If you have a firewall or a DMZ installed on your computer, you may experience +difficulties contacting one or more of the Clients to back them up. This is +especially true if you are trying to backup a Client across the Internet. + +\section{Technical Details} +\index[general]{Technical Details } +\index[general]{Details!Technical } + +If you are attempting to do this, the sequence of network events in Bacula to +do a backup are the following: + +\footnotesize +\begin{verbatim} +Console -> DIR:9101 +DIR -> SD:9103 +DIR -> FD:9102 +FD -> SD:9103 +\end{verbatim} +\normalsize + +Where hopefully it is obvious that DIR represents the Director, FD the File +daemon or client, and SD the Storage daemon. The numbers that follow those +names are the standard ports used by Bacula, and the -\gt{} represents the +left side making a connection to the right side (i.e. the right side is the +"server" or is listening on the specified port), and the left side is the +"client" that initiates the conversation. + +Note, port 9103 serves both the Director and the File daemon, each having its +own independent connection. + +If you are running {\bf iptables}, you might add something like: + +\footnotesize +\begin{verbatim} +-A FW-1-INPUT -m state --state NEW -m tcp -p tcp --dport 9101:9103 -j ACCEPT +\end{verbatim} +\normalsize + +on your server, and + +\footnotesize +\begin{verbatim} +-A FW-1-INPUT -m state --state NEW -m tcp -p tcp --dport 9102 -j ACCEPT +\end{verbatim} +\normalsize + +on your client. In both cases, I assume that the machine is allowed to +initiate connections on any port. If not, you will need to allow outgoing +connections on ports 9102 and 9103 on your server and 9103 on your client. +Thanks to Raymond Norton for this tip. + +\section{A Concrete Example} +\index[general]{Example!Concrete } +\index[general]{Concrete Example } + +The following discussion was originally written by +Jesse Guardiani because he has 'internal' and 'external' requiring the +Director and the Client to use different IP addresses. His original +solution was to define two different Storage resources in the Director's +conf file each pointing to the same Storage daemon but with different +IP addresses. In Bacula 1.38.x this no longer works, because Bacula makes +a one-to-one association between a Storage daemon resource and a Device (such +as an Autochanger). As a consequence, I have modified his original +text to a method that I believe will work, but is as of yet untested +(KES - July 2006). + +My bacula server is on the 192.168.1.0/24 network at IP address 192.168.1.52. +For the sake of discussion we will refer to this network as the 'internal' +network because it connects to the internet through a NAT'd firewall. We will +call the network on the public (internet) side of the NAT'd firewall the +'external' network. Also, for the sake of discussion we will call my bacula +server: + +\footnotesize +\begin{verbatim} + server.int.mydomain.tld +\end{verbatim} +\normalsize + +when a fully qualified domain name is required, or simply: + +\footnotesize +\begin{verbatim} + server +\end{verbatim} +\normalsize + +if a hostname is adequate. We will call the various bacula daemons running on +the server.int.mydomain.tld machine: + +\footnotesize +\begin{verbatim} + server-fd + server-sd + server-dir +\end{verbatim} +\normalsize + +In addition, I have two clients that I want to back up with Bacula. The first +client is on the internal network. Its fully qualified domain name is: + +\footnotesize +\begin{verbatim} + private1.int.mydomain.tld +\end{verbatim} +\normalsize + +And its hostname is: + +\footnotesize +\begin{verbatim} + private1 +\end{verbatim} +\normalsize + +This machine is a client and therefore runs just one bacula daemon: + +\footnotesize +\begin{verbatim} + private1-fd +\end{verbatim} +\normalsize + +The second client is on the external network. Its fully qualified domain name +is: + +\footnotesize +\begin{verbatim} + public1.mydomain.tld +\end{verbatim} +\normalsize + +And its hostname is: + +\footnotesize +\begin{verbatim} + public1 +\end{verbatim} +\normalsize + +This machine also runs just one bacula daemon: + +\footnotesize +\begin{verbatim} + public1-fd +\end{verbatim} +\normalsize + +Finally, I have a NAT firewall/gateway with two network interfaces. The first +interface is on the internal network and serves as a gateway to the internet +for all the machines attached to the internal network (For example, +server.int.mydomain.tld and private1.int.mydomain.tld). The second interface +is on the external (internet) network. The external interface has been +assigned the name: + +\footnotesize +\begin{verbatim} + firewall.mydomain.tld +\end{verbatim} +\normalsize + +Remember: + +\footnotesize +\begin{verbatim} + *.int.mydomain.tld = internal network + *.mydomain.tld = external network +\end{verbatim} +\normalsize + +\subsection{The Bacula Configuration Files for the Above} +\index[general]{Above!Bacula Configuration Files for the } +\index[general]{Bacula Configuration Files for the Above } + +server-sd manages a 4 tape AIT autoloader. All of my backups are written to +server-sd. I have just *one* Device resource in my server-sd.conf file: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "autochanger1";\ + Device = Drive0 + Changer Device = /dev/ch0; + Changer Command = "/usr/local/sbin/chio-bacula %c %o %S %a"; +} +Device { + Name = Drive0 + DriveIndex = 0 + Media Type = AIT-1; + Archive Device = /dev/nrsa1; + Label Media = yes; + AutoChanger = yes; + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + Hardware End of Medium = No + Fast Forward Space File = No + BSF at EOM = yes +} +\end{verbatim} +\normalsize + +(note, please see +\ilink{the Tape Testing}{FreeBSDTapes} chapter of this manual +for important FreeBSD information.) However, unlike previously, there +is only one Storage definition in my server-dir.conf file: + +\footnotesize +\begin{verbatim} +Storage { + Name = "autochanger1" # Storage device for backing up + Address = Storage-server + SDPort = 9103 + Password = "mysecretpassword" + Device = "autochanger1" + Media Type = AIT-1 + Autochanger = yes +} +\end{verbatim} +\normalsize + +Note that the Storage resource uses neither of the two addresses to +the Storage daemon -- neither server.int.mydomain.tld nor +firewall.mydomain.tld, but instead uses the address Storage-server. + +What is key is that in the internal net, Storage-server is resolved +to server.int.mydomain.tld, either with an entry in /etc/hosts, or by +creating and appropriate DNS entry, and on the external net (the Client +machine), Storage-server is resolved to firewall.mydomain.tld. + + +In addition to the above, I have two Client resources defined in +server-dir.conf: + +\footnotesize +\begin{verbatim} +Client { + Name = private1-fd + Address = private1.int.mydomain.tld + FDPort = 9102 + Catalog = MyCatalog + Password = "mysecretpassword" # password for FileDaemon +} +Client { + Name = public1-fd + Address = public1.mydomain.tld + FDPort = 9102 + Catalog = MyCatalog + Password = "mysecretpassword" # password for FileDaemon +} +\end{verbatim} +\normalsize + +And finally, to tie it all together, I have two Job resources defined in +server-dir.conf: + +\footnotesize +\begin{verbatim} +Job { + Name = "Private1-Backup" + Type = Backup + Client = private1-fd + FileSet = "Private1" + Schedule = "WeeklyCycle" + Storage = "autochanger1-int" + Messages = Standard + Pool = "Weekly" + Write Bootstrap = "/var/db/bacula/Private1-Backup.bsr" + Priority = 12 +} +Job { + Name = "Public1-Backup" + Type = Backup + Client = public1-fd + FileSet = "Public1" + Schedule = "WeeklyCycle" + Storage = "autochanger1-ext" + Messages = Standard + Pool = "Weekly" + Write Bootstrap = "/var/db/bacula/Public1-Backup.bsr" + Priority = 13 +} +\end{verbatim} +\normalsize + +It is important to notice that because the 'Private1-Backup' Job is intended +to back up a machine on the internal network so it resolves Storage-server +to contact the Storage daemon via the internal net. +On the other hand, the 'Public1-Backup' Job is intended to +back up a machine on the external network, so it resolves Storage-server +to contact the Storage daemon via the external net. + +I have left the Pool, Catalog, Messages, FileSet, Schedule, and Director +resources out of the above server-dir.conf examples because they are not +pertinent to the discussion. + +\subsection{How Does It Work?} +\index[general]{How Does It Work? } +\index[general]{Work!How Does It } + +If I want to run a backup of private1.int.mydomain.tld and store that backup +using server-sd then my understanding of the order of events is this: + +\begin{enumerate} +\item I execute my Bacula 'console' command on server.int.mydomain.tld. +\item console connects to server-dir. +\item I tell console to 'run' backup Job 'Private1-Backup'. +\item console relays this command to server-dir. +\item server-dir connects to private1-fd at private1.int.mydomain.tld:9102 +\item server-dir tells private1-fd to start sending the files defined in the + 'Private1-Backup' Job's FileSet resource to the Storage resource + 'autochanger1', which we have defined in server-dir.conf as having the +address:port of Storage-server, which is mapped by DNS to server.int.mydomain.tld. +\item private1-fd connects to server.int.mydomain.tld:9103 and begins sending + files. + \end{enumerate} + +Alternatively, if I want to run a backup of public1.mydomain.tld and store +that backup using server-sd then my understanding of the order of events is +this: + +\begin{enumerate} +\item I execute my Bacula 'console' command on server.int.mydomain.tld. +\item console connects to server-dir. +\item I tell console to 'run' backup Job 'Public1-Backup'. +\item console relays this command to server-dir. +\item server-dir connects, through the NAT'd firewall, to public1-fd at + public1.mydomain.tld:9102 +\item server-dir tells public1-fd to start sending the files defined in the + 'Public1-Backup' Job's FileSet resource to the Storage resource + 'autochanger1', which we have defined in server-dir.conf as having the + same address:port as above of Storage-server, but which on this machine + is resolved to firewall.mydomain.tld:9103. +\item public1-fd connects to firewall.mydomain.tld:9103 and begins sending + files. + \end{enumerate} + +\subsection{Important Note} +\index[general]{Important Note } +\index[general]{Note!Important } + +In order for the above 'Public1-Backup' Job to succeed, +firewall.mydomain.tld:9103 MUST be forwarded using the firewall's +configuration software to server.int.mydomain.tld:9103. Some firewalls call +this 'Server Publication'. Others may call it 'Port Forwarding'. + +\subsection{Firewall Problems} +\index[general]{Firewall Problems} +\index[general]{Problems!Firewalls} +Either a firewall or a router may decide to timeout and terminate +open connections if they are not active for a short time. By Internet +standards the period should be two hours, and should be indefinitely +extended if KEEPALIVE is set as is the case by Bacula. If your firewall +or router does not respect these rules, you may find Bacula connections +terminated. In that case, the first thing to try is turning on the +{\bf Heart Beat Interval} both in the File daemon and the Storage daemon +and set an interval of say five minutes. + +Also, if you have denial of service rate limiting in your firewall, this +too can cause Bacula disconnects since Bacula can at times use very high +access rates. To avoid this, you should implement default accept +rules for the Bacula ports involved before the rate limiting rules. + +Finally, if you have a Windows machine, it will most likely by default +disallow connections to the Bacula Windows File daemon. See the +Windows chapter of this manual for additional details. diff --git a/docs/manuals/de/problems/fix_tex.pl b/docs/manuals/de/problems/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/problems/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/problems/index.perl b/docs/manuals/de/problems/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/problems/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/problems/kaboom.tex b/docs/manuals/de/problems/kaboom.tex new file mode 100644 index 00000000..a4e5bc57 --- /dev/null +++ b/docs/manuals/de/problems/kaboom.tex @@ -0,0 +1,233 @@ +%% +%% + +\chapter{What To Do When Bacula Crashes (Kaboom)} +\label{KaboomChapter} +\index[general]{Kaboom!What To Do When Bacula Crashes } +\index[general]{What To Do When Bacula Crashes (Kaboom) } + +If you are running on a Linux system, and you have a set of working +configuration files, it is very unlikely that {\bf Bacula} will crash. As with +all software, however, it is inevitable that someday, it may crash, +particularly if you are running on another operating system or using a new or +unusual feature. + +This chapter explains what you should do if one of the three {\bf Bacula} +daemons (Director, File, Storage) crashes. When we speak of crashing, we +mean that the daemon terminates abnormally because of an error. There are +many cases where Bacula detects errors (such as PIPE errors) and will fail +a job. These are not considered crashes. In addition, under certain +conditions, Bacula will detect a fatal in the configuration, such as +lack of permission to read/write the working directory. In that case, +Bacula will force itself to crash with a SEGFAULT. However, before +crashing, Bacula will normally display a message indicating why. +For more details, please read on. + + +\section{Traceback} +\index[general]{Traceback} + +Each of the three Bacula daemons has a built-in exception handler which, in +case of an error, will attempt to produce a traceback. If successful the +traceback will be emailed to you. + +For this to work, you need to ensure that a few things are setup correctly on +your system: + +\begin{enumerate} +\item You must have a version of Bacula built with debug information turned + on and not stripped of debugging symbols. + +\item You must have an installed copy of {\bf gdb} (the GNU debugger), and it + must be on {\bf Bacula's} path. On some systems such as Solaris, {\bf + gdb} may be replaced by {\bf dbx}. + +\item The Bacula installed script file {\bf btraceback} must be in the same + directory as the daemon which dies, and it must be marked as executable. + +\item The script file {\bf btraceback.gdb} must have the correct path to it + specified in the {\bf btraceback} file. + +\item You must have a {\bf mail} program which is on {\bf Bacula's} path. + By default, this {\bf mail} program is set to {\bf bsmtp}, so it must + be correctly configured. + +\item If you run either the Director or Storage daemon under a non-root + userid, you will most likely need to modify the {\bf btraceback} file + to do something like {\bf sudo} (raise to root priority) for the + call to {\bf gdb} so that it has the proper permissions to debug + Bacula. +\end{enumerate} + +If all the above conditions are met, the daemon that crashes will produce a +traceback report and email it to you. If the above conditions are not true, +you can either run the debugger by hand as described below, or you may be able +to correct the problems by editing the {\bf btraceback} file. I recommend not +spending too much time on trying to get the traceback to work as it can be +very difficult. + +The changes that might be needed are to add a correct path to the {\bf gdb} +program, correct the path to the {\bf btraceback.gdb} file, change the {\bf +mail} program or its path, or change your email address. The key line in the +{\bf btraceback} file is: + +\footnotesize +\begin{verbatim} +gdb -quiet -batch -x /home/kern/bacula/bin/btraceback.gdb \ + $1 $2 2>\&1 | bsmtp -s "Bacula traceback" your-address@xxx.com +\end{verbatim} +\normalsize + +Since each daemon has the same traceback code, a single btraceback file is +sufficient if you are running more than one daemon on a machine. + +\section{Testing The Traceback} +\index[general]{Traceback!Testing The } +\index[general]{Testing The Traceback } + +To "manually" test the traceback feature, you simply start {\bf Bacula} then +obtain the {\bf PID} of the main daemon thread (there are multiple threads). +The output produced here will look different depending on what OS and what +version of the kernel you are running. +Unfortunately, the output had to be split to fit on this page: + +\footnotesize +\begin{verbatim} +[kern@rufus kern]$ ps fax --columns 132 | grep bacula-dir + 2103 ? S 0:00 /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2104 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2106 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2105 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf +\end{verbatim} +\normalsize + +which in this case is 2103. Then while Bacula is running, you call the program +giving it the path to the Bacula executable and the {\bf PID}. In this case, +it is: + +\footnotesize +\begin{verbatim} +./btraceback /home/kern/bacula/k/src/dird 2103 +\end{verbatim} +\normalsize + +It should produce an email showing you the current state of the daemon (in +this case the Director), and then exit leaving {\bf Bacula} running as if +nothing happened. If this is not the case, you will need to correct the +problem by modifying the {\bf btraceback} script. + +Typical problems might be that {\bf gdb} or {\bf dbx} for Solaris is not on +the default path. Fix this by specifying the full path to it in the {\bf +btraceback} file. Another common problem is that you haven't modified the +script so that the {\bf bsmtp} program has an appropriate smtp server or +the proper syntax for your smtp server. If you use the {\bf mail} program +and it is not on the default path, it will also fail. On some systems, it +is preferable to use {\bf Mail} rather than {\bf mail}. + +\section{Getting A Traceback On Other Systems} +\index[general]{Getting A Traceback On Other Systems} +\index[general]{Systems!Getting A Traceback On Other} + +It should be possible to produce a similar traceback on systems other than +Linux, either using {\bf gdb} or some other debugger. Solaris with {\bf dbx} +loaded works quite fine. On other systems, you will need to modify the {\bf +btraceback} program to invoke the correct debugger, and possibly correct the +{\bf btraceback.gdb} script to have appropriate commands for your debugger. If +anyone succeeds in making this work with another debugger, please send us a +copy of what you modified. Please keep in mind that for any debugger to +work, it will most likely need to run as root, so you may need to modify +the {\bf btraceback} script accordingly. + +\label{ManuallyDebugging} +\section{Manually Running Bacula Under The Debugger} +\index[general]{Manually Running Bacula Under The Debugger} +\index[general]{Debugger!Manually Running Bacula Under The} + +If for some reason you cannot get the automatic traceback, or if you want to +interactively examine the variable contents after a crash, you can run Bacula +under the debugger. Assuming you want to run the Storage daemon under the +debugger (the technique is the same for the other daemons, only the name +changes), you would do the following: + +\begin{enumerate} +\item Start the Director and the File daemon. If the Storage daemon also + starts, you will need to find its PID as shown above (ps fax | grep + bacula-sd) and kill it with a command like the following: + +\footnotesize +\begin{verbatim} + kill -15 PID +\end{verbatim} +\normalsize + +where you replace {\bf PID} by the actual value. + +\item At this point, the Director and the File daemon should be running but + the Storage daemon should not. + +\item cd to the directory containing the Storage daemon + +\item Start the Storage daemon under the debugger: + + \footnotesize +\begin{verbatim} + gdb ./bacula-sd +\end{verbatim} +\normalsize + +\item Run the Storage daemon: + + \footnotesize +\begin{verbatim} + run -s -f -c ./bacula-sd.conf +\end{verbatim} +\normalsize + +You may replace the {\bf ./bacula-sd.conf} with the full path to the Storage +daemon's configuration file. + +\item At this point, Bacula will be fully operational. + +\item In another shell command window, start the Console program and do what + is necessary to cause Bacula to die. + +\item When Bacula crashes, the {\bf gdb} shell window will become active and + {\bf gdb} will show you the error that occurred. + +\item To get a general traceback of all threads, issue the following command: + + +\footnotesize +\begin{verbatim} + thread apply all bt +\end{verbatim} +\normalsize + +After that you can issue any debugging command. +\end{enumerate} + +\section{Getting Debug Output from Bacula} +\index[general]{Getting Debug Output from Bacula } +Each of the daemons normally has debug compiled into the program, but +disabled. There are two ways to enable the debug output. One is to add the +{\bf -d nnn} option on the command line when starting the debugger. The {\bf +nnn} is the debug level, and generally anything between 50 and 200 is +reasonable. The higher the number, the more output is produced. The output is +written to standard output. + +The second way of getting debug output is to dynamically turn it on using the +Console using the {\bf setdebug} command. The full syntax of the command is: + +\footnotesize +\begin{verbatim} + setdebug level=nnn client=client-name storage=storage-name dir +\end{verbatim} +\normalsize + +If none of the options are given, the command will prompt you. You can +selectively turn on/off debugging in any or all the daemons (i.e. it is not +necessary to specify all the components of the above command). diff --git a/docs/manuals/de/problems/latex2html-init.pl b/docs/manuals/de/problems/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/problems/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/problems/problems.tex b/docs/manuals/de/problems/problems.tex new file mode 100644 index 00000000..b6a1d5ba --- /dev/null +++ b/docs/manuals/de/problems/problems.tex @@ -0,0 +1,81 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Problem Resolution Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{faq} +\include{tips} +\include{tapetesting} +\include{firewalls} +\include{kaboom} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/de/problems/rpm-faq.tex b/docs/manuals/de/problems/rpm-faq.tex new file mode 100644 index 00000000..1e37cc59 --- /dev/null +++ b/docs/manuals/de/problems/rpm-faq.tex @@ -0,0 +1,394 @@ +%% +%% + +\chapter{Bacula RPM Packaging FAQ} +\label{RpmFaqChapter} +\index[general]{FAQ!Bacula\textsuperscript{\textregistered} - RPM Packaging } +\index[general]{Bacula\textsuperscript{\textregistered} - RPM Packaging FAQ } + +\begin{enumerate} +\item + \ilink{How do I build Bacula for platform xxx?}{faq1} +\item + \ilink{How do I control which database support gets built?}{faq2} + +\item + \ilink{What other defines are used?}{faq3} +\item + \ilink{I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?}{faq4} +\item + \ilink{I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called + /usr/afsws/bin/pagsh.}{faq5} +\item + \ilink{I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?}{faq6} +\item + \ilink{Is there an easier way than sorting out all these command line options?}{faq7} +\item + \ilink{I just upgraded from 1.36.x to 1.38.x and now my director daemon won't start. It appears to start but dies silently and I get a "connection refused" error when starting the console. What is wrong?}{faq8} +\item + \ilink{There are a lot of rpm packages. Which packages do I need for what?}{faq9} +\end{enumerate} + +\section{Answers} +\index[general]{Answers } + +\begin{enumerate} +\item + \label{faq1} + {\bf How do I build Bacula for platform xxx?} + The bacula spec file contains defines to build for several platforms: + Red Hat 7.x (rh7), Red Hat 8.0 (rh8), Red Hat 9 (rh9), Fedora Core (fc1, + fc3, fc4, fc5, fc6, fc7), Whitebox Enterprise Linux 3.0 (wb3), Red Hat Enterprise Linux + (rhel3, rhel4, rhel5), Mandrake 10.x (mdk), Mandriva 2006.x (mdv) CentOS (centos3, centos4, centos5) + Scientific Linux (sl3, sl4, sl5) and SuSE (su9, su10, su102, su103). The package build is controlled by a mandatory define set at the beginning of the file. These defines basically just control the dependency information that gets coded into the finished rpm package as well + as any special configure options required. The platform define may be edited + in the spec file directly (by default all defines are set to 0 or "not set"). + For example, to build the Red Hat 7.x package find the line in the spec file + which reads + +\footnotesize +\begin{verbatim} + %define rh7 0 + +\end{verbatim} +\normalsize + +and edit it to read + +\footnotesize +\begin{verbatim} + %define rh7 1 + +\end{verbatim} +\normalsize + +Alternately you may pass the define on the command line when calling rpmbuild: + + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" bacula.spec + rpmbuild --rebuild --define build_rh7 1" bacula-x.x.x-x.src.rpm + +\end{verbatim} +\normalsize + +\item + \label{faq2} + {\bf How do I control which database support gets built?} + Another mandatory build define controls which database support is compiled, + one of build\_sqlite, build\_mysql or build\_postgresql. To get the MySQL + package and support either set the + +\footnotesize +\begin{verbatim} + %define mysql 0 + OR + %define mysql4 0 + OR + %define mysql5 0 + +\end{verbatim} +\normalsize + +to + +\footnotesize +\begin{verbatim} + %define mysql 1 + OR + %define mysql4 1 + OR + %define mysql5 1 + +\end{verbatim} +\normalsize + +in the spec file directly or pass it to rpmbuild on the command line: + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" --define "build_mysql 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql4 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql5 1" bacula.spec + +\end{verbatim} +\normalsize + +\item + \label{faq3} + {\bf What other defines are used?} + Three other building defines of note are the depkgs\_version, docs\_version and + \_rescuever identifiers. These two defines are set with each release and must + match the version of those sources that are being used to build the packages. + You would not ordinarily need to edit these. See also the Build Options section + below for other build time options that can be passed on the command line. +\item + \label{faq4} + {\bf I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?} + No, you do not need to be root and, in fact, it is better practice to + build rpm packages as a non-root user. Bacula packages are designed to + be built by a regular user but you must make a few changes on your + system to do this. If you are building on your own system then the + simplest method is to add write permissions for all to the build + directory (/usr/src/redhat/, /usr/src/RPM or /usr/src/packages). + To accomplish this, execute the following command as root: + +\footnotesize +\begin{verbatim} + chmod -R 777 /usr/src/redhat + chmod -R 777 /usr/src/RPM + chmod -R 777 /usr/src/packages + +\end{verbatim} +\normalsize + +If you are working on a shared system where you can not use the method +above then you need to recreate the appropriate above directory tree with all +of its subdirectories inside your home directory. Then create a file named + +{\tt .rpmmacros} + +in your home directory (or edit the file if it already exists) +and add the following line: + +\footnotesize +\begin{verbatim} + %_topdir /home/myuser/redhat + +\end{verbatim} +\normalsize + +Another handy directive for the .rpmmacros file if you wish to suppress the +creation of debug rpm packages is: + +\footnotesize +\begin{verbatim} + %debug_package %{nil} + +\end{verbatim} + +\normalsize + +\item + \label{faq5} + {\bf I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called /usr/afsws/bin/pagsh.} This + is a shell from the OpenAFS (Andrew File System). If you are seeing + this then you chose to include the docs/examples directory in your + package. One of the example scripts in this directory is a pagsh + script. Rpmbuild, when scanning for dependencies, looks at the shebang + line of all packaged scripts in addition to checking shared libraries. + To avoid this do not package the examples directory. If you are seeing this + problem you are building a very old bacula package as the examples have been + removed from the doc packaging. + +\item + \label{faq6} + {\bf I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?} Yes, + contributions from users are accepted and appreciated. Please examine the + directory platforms/contrib-rpm in the source code for further information. + +\item + \label{faq7} + {\bf Is there an easier way than sorting out all these command line options?} Yes, + there is a gui wizard shell script which you can use to rebuild the src rpm package. + Look in the source archive for platforms/contrib-rpm/rpm\_wizard.sh. This script will + allow you to specify build options using GNOME dialog screens. It requires zenity. + +\item + \label{faq8} + {\bf I just upgraded from 1.36.x to 1.38.x and now my director daemon +won't start. It appears to start but dies silently and I get a "connection +refused" error when starting the console. What is wrong?} Beginning with +1.38 the rpm packages are configured to run the director and storage +daemons as a non-root user. The file daemon runs as user root and group +bacula, the storage daemon as user bacula and group disk, and the director +as user bacula and group bacula. If you are upgrading you will need to +change some file permissions for things to work. Execute the following +commands as root: + +\footnotesize +\begin{verbatim} + chown bacula.bacula /var/bacula/* + chown root.bacula /var/bacula/bacula-fd.9102.state + chown bacula.disk /var/bacula/bacula-sd.9103.state + +\end{verbatim} +\normalsize + +Further, if you are using File storage volumes rather than tapes those +files will also need to have ownership set to user bacula and group bacula. + +\item + \label{faq9} + {\bf There are a lot of rpm packages. Which packages do I need for +what?} For a bacula server you need to select the packsge based upon your +preferred catalog database: one of bacula-mysql, bacula-postgresql or +bacula-sqlite. If your system does not provide an mtx package you also +need bacula-mtx to satisfy that dependancy. For a client machine you need +only install bacula-client. Optionally, for either server or client +machines, you may install a graphical console bacula-gconsole and/or +bacula-wxconsole. The Bacula Administration Tool is installed with the +bacula-bat package. One last package, bacula-updatedb is required only when +upgrading a server more than one database revision level. + + + +\item {\bf Support for RHEL3/4/5, CentOS 3/4/5, Scientific Linux 3/4/5 and x86\_64} + The examples below show + explicit build support for RHEL4 and CentOS 4. Build support + for x86\_64 has also been added. +\end{enumerate} + +\footnotesize +\begin{verbatim} +Build with one of these 3 commands: + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_sqlite 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_postgresql 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_mysql4 1" \ + bacula-1.38.3-1.src.rpm + +For CentOS substitute '--define "build_centos4 1"' in place of rhel4. +For Scientific Linux substitute '--define "build_sl4 1"' in place of rhel4. + +For 64 bit support add '--define "build_x86_64 1"' +\end{verbatim} +\normalsize + +\section{Build Options} +\index[general]{Build Options} +The spec file currently supports building on the following platforms: +\footnotesize +\begin{verbatim} +Red Hat builds +--define "build_rh7 1" +--define "build_rh8 1" +--define "build_rh9 1" + +Fedora Core build +--define "build_fc1 1" +--define "build_fc3 1" +--define "build_fc4 1" +--define "build_fc5 1" +--define "build_fc6 1" +--define "build_fc7 1" + +Whitebox Enterprise build +--define "build_wb3 1" + +Red Hat Enterprise builds +--define "build_rhel3 1" +--define "build_rhel4 1" +--define "build_rhel5 1" + +CentOS build +--define "build_centos3 1" +--define "build_centos4 1" +--define "build_centos5 1" + +Scientific Linux build +--define "build_sl3 1" +--define "build_sl4 1" +--define "build_sl5 1" + +SuSE build +--define "build_su9 1" +--define "build_su10 1" +--define "build_su102 1" +--define "build_su103 1" + +Mandrake 10.x build +--define "build_mdk 1" + +Mandriva build +--define "build_mdv 1" + +MySQL support: +for mysql 3.23.x support define this +--define "build_mysql 1" +if using mysql 4.x define this, +currently: Mandrake 10.x, Mandriva 2006.0, SuSE 9.x & 10.0, FC4 & RHEL4 +--define "build_mysql4 1" +if using mysql 5.x define this, +currently: SuSE 10.1 & FC5 +--define "build_mysql5 1" + +PostgreSQL support: +--define "build_postgresql 1" + +Sqlite support: +--define "build_sqlite 1" + +Build the client rpm only in place of one of the above database full builds: +--define "build_client_only 1" + +X86-64 support: +--define "build_x86_64 1" + +Supress build of bgnome-console: +--define "nobuild_gconsole 1" + +Build the WXWindows console: +requires wxGTK >= 2.6 +--define "build_wxconsole 1" + +Build the Bacula Administration Tool: +requires QT >= 4.2 +--define "build_bat 1" + +Build python scripting support: +--define "build_python 1" + +Modify the Packager tag for third party packages: +--define "contrib_packager Your Name " + +\end{verbatim} +\normalsize + +\section{RPM Install Problems} +\index[general]{RPM Install Problems} +In general the RPMs, once properly built should install correctly. +However, when attempting to run the daemons, a number of problems +can occur: +\begin{itemize} +\item [Wrong /var/bacula Permissions] + By default, the Director and Storage daemon do not run with + root permission. If the /var/bacula is owned by root, then it + is possible that the Director and the Storage daemon will not + be able to access this directory, which is used as the Working + Directory. To fix this, the easiest thing to do is: +\begin{verbatim} + chown bacula:bacula /var/bacula +\end{verbatim} + Note: as of 1.38.8 /var/bacula is installed root:bacula with + permissions 770. +\item [The Storage daemon cannot Access the Tape drive] + This can happen in some older RPM releases where the Storage + daemon ran under userid bacula, group bacula. There are two + ways of fixing this: the best is to modify the /etc/init.d/bacula-sd + file so that it starts the Storage daemon with group "disk". + The second way to fix the problem is to change the permissions + of your tape drive (usually /dev/nst0) so that Bacula can access it. + You will probably need to change the permissions of the SCSI control + device as well, which is usually /dev/sg0. The exact names depend + on your configuration, please see the Tape Testing chapter for + more information on devices. +\end{itemize} + diff --git a/docs/manuals/de/problems/setup.sm b/docs/manuals/de/problems/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/problems/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/problems/tapetesting.tex b/docs/manuals/de/problems/tapetesting.tex new file mode 100644 index 00000000..7281f34e --- /dev/null +++ b/docs/manuals/de/problems/tapetesting.tex @@ -0,0 +1,1293 @@ +%% +%% + +\chapter{Testing Your Tape Drive With Bacula} +\label{TapeTestingChapter} +\index[general]{Testing Your Tape Drive With Bacula} + +This chapter is concerned with testing and configuring your tape drive to make +sure that it will work properly with Bacula using the {\bf btape} program. +\label{summary} + +\section{Get Your Tape Drive Working} + +In general, you should follow the following steps to get your tape drive to +work with Bacula. Start with a tape mounted in your drive. If you have an +autochanger, load a tape into the drive. We use {\bf /dev/nst0} as the tape +drive name, you will need to adapt it according to your system. + +Do not proceed to the next item until you have succeeded with the previous +one. + +\begin{enumerate} +\item Make sure that Bacula (the Storage daemon) is not running + or that you have {\bf unmount}ed the drive you will use + for testing. + +\item Use tar to write to, then read from your drive: + + \footnotesize +\begin{verbatim} + mt -f /dev/nst0 rewind + tar cvf /dev/nst0 . + mt -f /dev/nst0 rewind + tar tvf /dev/nst0 + +\end{verbatim} +\normalsize + +\item Make sure you have a valid and correct Device resource corresponding + to your drive. For Linux users, generally, the default one works. For + FreeBSD users, there are two possible Device configurations (see below). + For other drives and/or OSes, you will need to first ensure that your + system tape modes are properly setup (see below), then possibly modify + you Device resource depending on the output from the btape program (next + item). When doing this, you should consult the \ilink{Storage Daemon + Configuration}{StoredConfChapter} of this manual. + +\item If you are using a Fibre Channel to connect your tape drive to + Bacula, please be sure to disable any caching in the NSR (network + storage router, which is a Fibre Channel to SCSI converter). + +\item Run the btape {\bf test} command: + + \footnotesize +\begin{verbatim} + ./btape -c bacula-sd.conf /dev/nst0 + test + +\end{verbatim} +\normalsize + + It isn't necessary to run the autochanger part of the test at this time, + but do not go past this point until the basic test succeeds. If you do + have an autochanger, please be sure to read the \ilink{Autochanger + chapter}{AutochangersChapter} of this manual. + +\item Run the btape {\bf fill} command, preferably with two volumes. This + can take a long time. If you have an autochanger and it is configured, Bacula + will automatically use it. If you do not have it configured, you can manually + issue the appropriate {\bf mtx} command, or press the autochanger buttons to + change the tape when requested to do so. + +\item FreeBSD users, if you have a pre-5.0 system run the {\bf tapetest} + program, and make sure your system is patched if necessary. The tapetest + program can be found in the platform/freebsd directory. The instructions + for its use are at the top of the file. + +\item Run Bacula, and backup a reasonably small directory, say 60 + Megabytes. Do three successive backups of this directory. + +\item Stop Bacula, then restart it. Do another full backup of the same + directory. Then stop and restart Bacula. + +\item Do a restore of the directory backed up, by entering the following + restore command, being careful to restore it to an alternate location: + + +\footnotesize +\begin{verbatim} + restore select all done + yes + +\end{verbatim} +\normalsize + + Do a {\bf diff} on the restored directory to ensure it is identical to the + original directory. If you are going to backup multiple different systems + (Linux, Windows, Mac, Solaris, FreeBSD, ...), be sure you test the restore + on each system type. + +\item If you have an autochanger, you should now go back to the btape program + and run the autochanger test: + +\footnotesize +\begin{verbatim} + ./btape -c bacula-sd.conf /dev/nst0 + auto + +\end{verbatim} +\normalsize + + Adjust your autochanger as necessary to ensure that it works correctly. See + the Autochanger chapter of this manual for a complete discussion of testing + your autochanger. + +\item We strongly recommend that you use a dedicated SCSI + controller for your tape drives. Scanners are known to induce + serious problems with the SCSI bus, causing it to reset. If the + SCSI bus is reset while Bacula has the tape drive open, it will + most likely be fatal to your tape since the drive will rewind. + These kinds of problems show up in the system log. For example, + the following was most likely caused by a scanner: + +\footnotesize +\begin{verbatim} +Feb 14 17:29:55 epohost kernel: (scsi0:A:2:0): No or incomplete CDB sent to device. +Feb 14 17:29:55 epohost kernel: scsi0: Issued Channel A Bus Reset. 1 SCBs aborted +\end{verbatim} +\normalsize + +\end{enumerate} + +If you have reached this point, you stand a good chance of having everything +work. If you get into trouble at any point, {\bf carefully} read the +documentation given below. If you cannot get past some point, ask the {\bf +bacula-users} email list, but specify which of the steps you have successfully +completed. In particular, you may want to look at the +\ilink{ Tips for Resolving Problems}{problems1} section below. + + +\label{NoTapeInDrive} +\subsection{Problems When no Tape in Drive} +\index[general]{Problems When no Tape in Drive} +When Bacula was first written the Linux 2.4 kernel permitted opening the +drive whether or not there was a tape in the drive. Thus the Bacula code is +based on the concept that if the drive cannot be opened, there is a serious +problem, and the job is failed. + +With version 2.6 of the Linux kernel, if there is no tape in the drive, the +OS will wait two minutes (default) and then return a failure, and consequently, +Bacula version 1.36 and below will fail the job. This is important to keep +in mind, because if you use an option such as {\bf Offline on Unmount = +yes}, there will be a point when there is no tape in the drive, and if +another job starts or if Bacula asks the operator to mount a tape, when +Bacula attempts to open the drive (about a 20 minute delay), it will fail +and Bacula will fail the job. + +In version 1.38.x, the Bacula code partially gets around this problem -- at +least in the initial open of the drive. However, functions like Polling +the drive do not work correctly if there is no tape in the drive. +Providing you do not use {\bf Offline on Unmount = yes}, you should not +experience job failures as mentioned above. If you do experience such +failures, you can also increase the {\bf Maximum Open Wait} time interval, +which will give you more time to mount the next tape before the job is +failed. + +\subsection{Specifying the Configuration File} +\index[general]{File!Specifying the Configuration} +\index[general]{Specifying the Configuration File} + +Starting with version 1.27, each of the tape utility programs including the +{\bf btape} program requires a valid Storage daemon configuration file +(actually, the only part of the configuration file that {\bf btape} needs is +the {\bf Device} resource definitions). This permits {\bf btape} to find the +configuration parameters for your archive device (generally a tape drive). +Without those parameters, the testing and utility programs do not know how to +properly read and write your drive. By default, they use {\bf bacula-sd.conf} +in the current directory, but you may specify a different configuration file +using the {\bf -c} option. + +\subsection{Specifying a Device Name For a Tape} +\index[general]{Tape!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a Tape} + +{\bf btape} {\bf device-name} where the Volume can be found. In the case of a +tape, this is the physical device name such as {\bf /dev/nst0} or {\bf +/dev/rmt/0ubn} depending on your system that you specify on the Archive Device +directive. For the program to work, it must find the identical name in the +Device resource of the configuration file. If the name is not found in the +list of physical names, the utility program will compare the name you entered +to the Device names (rather than the Archive device names). + +When specifying a tape device, it is preferable that the "non-rewind" +variant of the device file name be given. In addition, on systems such as +Sun, which have multiple tape access methods, you must be sure to specify +to use Berkeley I/O conventions with the device. The +{\bf b} in the Solaris (Sun) archive specification {\bf /dev/rmt/0mbn} is +what is needed in this case. Bacula does not support SysV tape drive +behavior. + +See below for specifying Volume names. + +\subsection{Specifying a Device Name For a File} +\index[general]{File!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a File} + +If you are attempting to read or write an archive file rather than a tape, the +{\bf device-name} should be the full path to the archive location including +the filename. The filename (last part of the specification) will be stripped +and used as the Volume name, and the path (first part before the filename) +must have the same entry in the configuration file. So, the path is equivalent +to the archive device name, and the filename is equivalent to the volume name. + + +\section{btape} +\label{btape1} +\index[general]{Btape} + +This program permits a number of elementary tape operations via a tty command +interface. The {\bf test} command, described below, can be very useful for +testing tape drive compatibility problems. Aside from initial testing of tape +drive compatibility with {\bf Bacula}, {\bf btape} will be mostly used by +developers writing new tape drivers. + +{\bf btape} can be dangerous to use with existing {\bf Bacula} tapes because +it will relabel a tape or write on the tape if so requested regardless of +whether or not the tape contains valuable data, so please be careful and use +it only on blank tapes. + +To work properly, {\bf btape} needs to read the Storage daemon's configuration +file. As a default, it will look for {\bf bacula-sd.conf} in the current +directory. If your configuration file is elsewhere, please use the {\bf -c} +option to specify where. + +The physical device name or the Device resource name must be specified on the +command line, and this same device name must be present in the Storage +daemon's configuration file read by {\bf btape} + +\footnotesize +\begin{verbatim} +Usage: btape [options] device_name + -b specify bootstrap file + -c set configuration file to file + -d set debug level to nn + -p proceed inspite of I/O errors + -s turn off signals + -v be verbose + -? print this message. +\end{verbatim} +\normalsize + +\subsection{Using btape to Verify your Tape Drive} +\index[general]{Using btape to Verify your Tape Drive} +\index[general]{Drive!Using btape to Verify your Tape} + +An important reason for this program is to ensure that a Storage daemon +configuration file is defined so that Bacula will correctly read and write +tapes. + +It is highly recommended that you run the {\bf test} command before running +your first Bacula job to ensure that the parameters you have defined for your +storage device (tape drive) will permit {\bf Bacula} to function properly. You +only need to mount a blank tape, enter the command, and the output should be +reasonably self explanatory. For example: + +\footnotesize +\begin{verbatim} +(ensure that Bacula is not running) +./btape -c /usr/bin/bacula/bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +The output will be: + +\footnotesize +\begin{verbatim} +Tape block granularity is 1024 bytes. +btape: btape.c:376 Using device: /dev/nst0 +* +\end{verbatim} +\normalsize + +Enter the test command: + +\footnotesize +\begin{verbatim} +test +\end{verbatim} +\normalsize + +The output produced should be something similar to the following: I've cut the +listing short because it is frequently updated to have new tests. + +\footnotesize +\begin{verbatim} +=== Append files test === +This test is essential to Bacula. +I'm going to write one record in file 0, + two records in file 1, + and three records in file 2 +btape: btape.c:387 Rewound /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:387 Rewound /dev/nst0 +btape: btape.c:693 Now moving to end of media. +btape: btape.c:427 Moved to end of media +We should be in file 3. I am at file 3. This is correct! +Now the important part, I am going to attempt to append to the tape. +... +=== End Append files test === +\end{verbatim} +\normalsize + +If you do not successfully complete the above test, please resolve the +problem(s) before attempting to use {\bf Bacula}. Depending on your tape +drive, the test may recommend that you add certain records to your +configuration. We strongly recommend that you do so and then re-run the above +test to insure it works the first time. + +Some of the suggestions it provides for resolving the problems may or may not +be useful. If at all possible avoid using fixed blocking. If the test suddenly +starts to print a long series of: + +\footnotesize +\begin{verbatim} +Got EOF on tape. +Got EOF on tape. +... +\end{verbatim} +\normalsize + +then almost certainly, you are running your drive in fixed block mode rather +than variable block mode. See below for more help of resolving fix +versus variable block problems. + +It is also possible that you have your drive +set in SysV tape drive mode. The drive must use BSD tape conventions. +See the section above on setting your {\bf Archive device} correctly. + +For FreeBSD users, please see the notes below for doing further testing of +your tape drive. + +\label{SCSITricks} +\subsection{Linux SCSI Tricks} +\index[general]{Tricks!Linux SCSI} +\index[general]{Linux SCSI Tricks} + +You can find out what SCSI devices you have by doing: + +\footnotesize +\begin{verbatim} +lsscsi +\end{verbatim} +\normalsize + +Typical output is: + +\footnotesize +\begin{verbatim} +[0:0:0:0] disk ATA ST3160812AS 3.AD /dev/sda +[2:0:4:0] tape HP Ultrium 2-SCSI F6CH /dev/st0 +[2:0:5:0] tape HP Ultrium 2-SCSI F6CH /dev/st1 +[2:0:6:0] mediumx OVERLAND LXB 0107 - +[2:0:9:0] tape HP Ultrium 1-SCSI E50H /dev/st2 +[2:0:10:0] mediumx OVERLAND LXB 0107 - +\end{verbatim} +\normalsize + +There are two drives in one autochanger: /dev/st0 and /dev/st1 +and a third tape drive at /dev/st2. For using them with Bacula, one +would normally reference them as /dev/nst0 ... /dev/nst2. Not also, +there are two different autochangers identified as "mediumx OVERLAND LXB". +They can be addressed via their /dev/sgN designation, which can be +obtained by counting from the beginning as 0 to each changer. In the +above case, the two changers are located on /dev/sg3 and /dev/sg5. The one +at /dev/sg3, controls drives /dev/nst0 and /dev/nst1; and the one at +/dev/sg5 controles drive /dev/nst2. + +If you do not have the {\bf lsscsi} command, you can obtain the same +information as follows: + +\footnotesize +\begin{verbatim} +cat /proc/scsi/scsi +\end{verbatim} +\normalsize + +For the above example with the three drives and two autochangers, +I get: + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi0 Channel: 00 Id: 00 Lun: 00 + Vendor: ATA Model: ST3160812AS Rev: 3.AD + Type: Direct-Access ANSI SCSI revision: 05 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 05 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 06 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0107 + Type: Medium Changer ANSI SCSI revision: 02 +Host: scsi2 Channel: 00 Id: 09 Lun: 00 + Vendor: HP Model: Ultrium 1-SCSI Rev: E50H + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 10 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0107 + Type: Medium Changer ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + + +As an additional example, I get the following (on a different machine from the +above example): + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi2 Channel: 00 Id: 01 Lun: 00 + Vendor: HP Model: C5713A Rev: H107 + Type: Sequential-Access ANSI SCSI revision: 02 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: SONY Model: SDT-10000 Rev: 0110 + Type: Sequential-Access ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + +The above represents first an autochanger and second a simple +tape drive. The HP changer (the first entry) uses the same SCSI channel +for data and for control, so in Bacula, you would use: +\footnotesize +\begin{verbatim} +Archive Device = /dev/nst0 +Changer Device = /dev/sg0 +\end{verbatim} +\normalsize + +If you want to remove the SDT-10000 device, you can do so as root with: + +\footnotesize +\begin{verbatim} +echo "scsi remove-single-device 2 0 4 0">/proc/scsi/scsi +\end{verbatim} +\normalsize + +and you can put add it back with: + +\footnotesize +\begin{verbatim} +echo "scsi add-single-device 2 0 4 0">/proc/scsi/scsi +\end{verbatim} +\normalsize + +where the 2 0 4 0 are the Host, Channel, Id, and Lun as seen on the output +from {\bf cat /proc/scsi/scsi}. Note, the Channel must be specified as +numeric. + +Below is a slightly more complicated output, which is a single autochanger +with two drives, and which operates the changer on a different channel +from from the drives: + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi0 Channel: 00 Id: 00 Lun: 00 + Vendor: ATA Model: WDC WD1600JD-75H Rev: 08.0 + Type: Direct-Access ANSI SCSI revision: 05 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 05 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 06 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0106 + Type: Medium Changer ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + +The above tape drives are accessed on /dev/nst0 and /dev/nst1, while +the control channel for those two drives is /dev/sg3. + + + +\label{problems1} +\section{Tips for Resolving Problems} +\index[general]{Problems!Tips for Resolving} +\index[general]{Tips for Resolving Problems} + +\label{CannotRestore} +\subsection{Bacula Saves But Cannot Restore Files} +\index[general]{Files!Bacula Saves But Cannot Restore} +\index[general]{Bacula Saves But Cannot Restore Files} + +If you are getting error messages such as: + +\footnotesize +\begin{verbatim} +Volume data error at 0:1! Wanted block-id: "BB02", got "". Buffer discarded +\end{verbatim} +\normalsize + +It is very likely that Bacula has tried to do block positioning and ended up +at an invalid block. This can happen if your tape drive is in fixed block mode +while Bacula's default is variable blocks. Note that in such cases, Bacula is +perfectly able to write to your Volumes (tapes), but cannot position to read +them. + +There are two possible solutions. + +\begin{enumerate} +\item The first and best is to always ensure that your drive is in variable + block mode. Note, it can switch back to fixed block mode on a reboot or if + another program uses the drive. So on such systems you need to modify the + Bacula startup files to explicitly set: + +\footnotesize +\begin{verbatim} +mt -f /dev/nst0 defblksize 0 +\end{verbatim} +\normalsize + +or whatever is appropriate on your system. Note, if you are running a Linux +system, and the above command does not work, it is most likely because you +have not loaded the appropriate {\bf mt} package, which is often called +{\bf mt\_st}, but may differ according to your distribution. + +\item The second possibility, especially, if Bacula wrote while the drive was + in fixed block mode, is to turn off block positioning in Bacula. This is done + by adding: + +\footnotesize +\begin{verbatim} +Block Positioning = no +\end{verbatim} +\normalsize + +to the Device resource. This is not the recommended procedure because it can +enormously slow down recovery of files, but it may help where all else +fails. This directive is available in version 1.35.5 or later (and not yet +tested). +\end{enumerate} + +If you are getting error messages such as: +\footnotesize +\begin{verbatim} +Volume data error at 0:0! +Block checksum mismatch in block=0 len=32625 calc=345678 blk=123456 +\end{verbatim} +\normalsize + +You are getting tape read errors, and this is most likely due to +one of the following things: +\begin{enumerate} +\item An old or bad tape. +\item A dirty drive that needs cleaning (particularly for DDS drives). +\item A loose SCSI cable. +\item Old firmware in your drive. Make sure you have the latest firmware + loaded. +\item Computer memory errors. +\item Over-clocking your CPU. +\item A bad SCSI card. +\end{enumerate} + + +\label{opendevice} +\subsection{Bacula Cannot Open the Device} +\index[general]{Device!Bacula Cannot Open the} +\index[general]{Bacula Cannot Open the Device} + +If you get an error message such as: + +\footnotesize +\begin{verbatim} +dev open failed: dev.c:265 stored: unable to open +device /dev/nst0:> ERR=No such device or address +\end{verbatim} +\normalsize + +the first time you run a job, it is most likely due to the fact that you +specified the incorrect device name on your {\bf Archive Device}. + +If Bacula works fine with your drive, then all off a sudden you get error +messages similar to the one shown above, it is quite possible that your driver +module is being removed because the kernel deems it idle. This is done via +{\bf crontab} with the use of {\bf rmmod -a}. To fix the problem, you can +remove this entry from {\bf crontab}, or you can manually {\bf modprob} your +driver module (or add it to the local startup script). Thanks to Alan Brown +for this tip. +\label{IncorrectFiles} + +\subsection{Incorrect File Number} +\index[general]{Number!Incorrect File} +\index[general]{Incorrect File Number} + +When Bacula moves to the end of the medium, it normally uses the {\bf +ioctl(MTEOM)} function. Then Bacula uses the {\bf ioctl(MTIOCGET)} function to +retrieve the current file position from the {\bf mt\_fileno} field. Some SCSI +tape drivers will use a fast means of seeking to the end of the medium and in +doing so, they will not know the current file position and hence return a {\bf +-1}. As a consequence, if you get {\bf "This is NOT correct!"} in the +positioning tests, this may be the cause. You must correct this condition in +order for Bacula to work. + +There are two possible solutions to the above problem of incorrect file +number: + +\begin{itemize} +\item Figure out how to configure your SCSI driver to keep track of the file + position during the MTEOM request. This is the preferred solution. +\item Modify the {\bf Device} resource of your {\bf bacula-sd.conf} file to + include: + +\footnotesize +\begin{verbatim} +Hardware End of File = no +\end{verbatim} +\normalsize + +This will cause Bacula to use the MTFSF request to seek to the end of the +medium, and Bacula will keep track of the file number itself. +\end{itemize} + +\label{IncorrectBlocks} +\subsection{Incorrect Number of Blocks or Positioning Errors} +\index[general]{Testing!Incorrect Number of Blocks or Positioning Errors} +\index[general]{Incorrect Number of Blocks or Positioning Errors} + +{\bf Bacula's} preferred method of working with tape drives (sequential +devices) is to run in variable block mode, and this is what is set by default. +You should first ensure that your tape drive is set for variable block mode +(see below). + +If your tape drive is in fixed block mode and you have told Bacula to use +different fixed block sizes or variable block sizes (default), you will get +errors when Bacula attempts to forward space to the correct block (the kernel +driver's idea of tape blocks will not correspond to Bacula's). + +All modern tape drives support variable tape blocks, but some older drives (in +particular the QIC drives) as well as the ATAPI ide-scsi driver run only in +fixed block mode. The Travan tape drives also apparently must run in fixed +block mode (to be confirmed). + +Even in variable block mode, with the exception of the first record on the +second or subsequent volume of a multi-volume backup, Bacula will write blocks +of a fixed size. However, in reading a tape, Bacula will assume that for each +read request, exactly one block from the tape will be transferred. This the +most common way that tape drives work and is well supported by {\bf Bacula}. + +Drives that run in fixed block mode can cause serious problems for Bacula if +the drive's block size does not correspond exactly to {\bf Bacula's} block +size. In fixed block size mode, drivers may transmit a partial block or +multiple blocks for a single read request. From {\bf Bacula's} point of view, +this destroys the concept of tape blocks. It is much better to run in variable +block mode, and almost all modern drives (the OnStream is an exception) run in +variable block mode. In order for Bacula to run in fixed block mode, you must +include the following records in the Storage daemon's Device resource +definition: + +\footnotesize +\begin{verbatim} +Minimum Block Size = nnn +Maximum Block Size = nnn +\end{verbatim} +\normalsize + +where {\bf nnn} must be the same for both records and must be identical to the +driver's fixed block size. + +We recommend that you avoid this configuration if at all possible by using +variable block sizes. + +If you must run with fixed size blocks, make sure they are not 512 bytes. This +is too small and the overhead that Bacula has with each record will become +excessive. If at all possible set any fixed block size to something like +64,512 bytes or possibly 32,768 if 64,512 is too large for your drive. See +below for the details on checking and setting the default drive block size. + +To recover files from tapes written in fixed block mode, see below. + +\label{TapeModes} +\subsection{Ensuring that the Tape Modes Are Properly Set -- {\bf Linux +Only}} +\index[general]{Ensuring that the Tape Modes Are Properly Set -- Linux Only} + +If you have a modern SCSI tape drive and you are having problems with the {\bf +test} command as noted above, it may be that some program has set one or more +of your SCSI driver's options to non-default values. For example, if your +driver is set to work in SysV manner, Bacula will not work correctly because +it expects BSD behavior. To reset your tape drive to the default values, you +can try the following, but {\bf ONLY} if you have a SCSI tape drive on a {\bf +Linux} system: + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 rewind +mt -f /dev/nst0 stoptions buffer-writes async-writes read-ahead +\end{verbatim} +\normalsize + +The above commands will clear all options and then set those specified. None +of the specified options are required by Bacula, but a number of other options +such as SysV behavior must not be set. Bacula does not support SysV tape +behavior. On systems other than Linux, you will need to consult your {\bf mt} +man pages or documentation to figure out how to do the same thing. This should +not really be necessary though -- for example, on both Linux and Solaris +systems, the default tape driver options are compatible with Bacula. +On Solaris systems, you must take care to specify the correct device +name on the {\bf Archive device} directive. See above for more details. + +You may also want to ensure that no prior program has set the default block +size, as happened to one user, by explicitly turning it off with: + +\footnotesize +\begin{verbatim} +mt -f /dev/nst0 defblksize 0 +\end{verbatim} +\normalsize + +If you are running a Linux +system, and the above command does not work, it is most likely because you +have not loaded the appropriate {\bf mt} package, which is often called +{\bf mt\_st}, but may differ according to your distribution. + +If you would like to know what options you have set before making any of the +changes noted above, you can now view them on Linux systems, thanks to a tip +provided by Willem Riede. Do the following: + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 stsetoptions 0 +grep st0 /var/log/messages +\end{verbatim} +\normalsize + +and you will get output that looks something like the following: + +\footnotesize +\begin{verbatim} +kernel: st0: Mode 0 options: buffer writes: 1, async writes: 1, read ahead: 1 +kernel: st0: can bsr: 0, two FMs: 0, fast mteom: 0, auto lock: 0, +kernel: st0: defs for wr: 0, no block limits: 0, partitions: 0, s2 log: 0 +kernel: st0: sysv: 0 nowait: 0 +\end{verbatim} +\normalsize + +Note, I have chopped off the beginning of the line with the date and machine +name for presentation purposes. + +Some people find that the above settings only last until the next reboot, so +please check this otherwise you may have unexpected problems. + +Beginning with Bacula version 1.35.8, if Bacula detects that you are running +in variable block mode, it will attempt to set your drive appropriately. All +OSes permit setting variable block mode, but some OSes do not permit setting +the other modes that Bacula needs to function properly. + +\label{compression} +\subsection{Tape Hardware Compression and Blocking Size} +\index[general]{Tape Hardware Compression and Blocking Size} +\index[general]{Size!Tape Hardware Compression and Blocking Size} + +As far as I can tell, there is no way with the {\bf mt} program to check if +your tape hardware compression is turned on or off. You can, however, turn it +on by using (on Linux): + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 defcompression 1 +\end{verbatim} +\normalsize + +and of course, if you use a zero instead of the one at the end, you will turn +it off. + +If you have built the {\bf mtx} program in the {\bf depkgs} package, you can +use tapeinfo to get quite a bit of information about your tape drive even if +it is not an autochanger. This program is called using the SCSI control +device. On Linux for tape drive /dev/nst0, this is usually /dev/sg0, while on +FreeBSD for /dev/nsa0, the control device is often /dev/pass2. For example on +my DDS-4 drive (/dev/nst0), I get the following: + +\footnotesize +\begin{verbatim} +tapeinfo -f /dev/sg0 +Product Type: Tape Drive +Vendor ID: 'HP ' +Product ID: 'C5713A ' +Revision: 'H107' +Attached Changer: No +MinBlock:1 +MaxBlock:16777215 +SCSI ID: 5 +SCSI LUN: 0 +Ready: yes +BufferedMode: yes +Medium Type: Not Loaded +Density Code: 0x26 +BlockSize: 0 +\end{verbatim} +\normalsize + +where the {\bf DataCompEnabled: yes} means that tape hardware compression is +turned on. You can turn it on and off (yes|no) by using the {\bf mt} +commands given above. Also, this output will tell you if the {\bf BlockSize} +is non-zero and hence set for a particular block size. Bacula is not likely to +work in such a situation because it will normally attempt to write blocks of +64,512 bytes, except the last block of the job which will generally be +shorter. The first thing to try is setting the default block size to zero +using the {\bf mt -f /dev/nst0 defblksize 0} command as shown above. +On FreeBSD, this would be something like: {\bf mt -f /dev/nsa0 blocksize 0}. + +On some operating systems with some tape drives, the amount of data that +can be written to the tape and whether or not compression is enabled is +determined by the density usually the {\bf mt -f /dev/nst0 setdensity xxx} command. +Often {\bf mt -f /dev/nst0 status} will print out the current +density code that is used with the drive. Most systems, but unfortunately +not all, set the density to the maximum by default. On some systems, you +can also get a list of all available density codes with: +{\bf mt -f /dev/nst0 densities} or a similar {\bf mt} command. +Note, for DLT and SDLT devices, no-compression versus compression is very +often controlled by the density code. On FreeBSD systems, the compression +mode is set using {\bf mt -f /dev/nsa0 comp xxx} where xxx is the +mode you want. In general, see {\bf man mt} for the options available on +your system. + +Note, some of the above {\bf mt} commands may not be persistent depending +on your system configuration. That is they may be reset if a program +other than Bacula uses the drive or, as is frequently the case, on reboot +of your system. + +If your tape drive requires fixed block sizes (very unusual), you can use the +following records: + +\footnotesize +\begin{verbatim} +Minimum Block Size = nnn +Maximum Block Size = nnn +\end{verbatim} +\normalsize + +in your Storage daemon's Device resource to force Bacula to write fixed size +blocks (where you sent nnn to be the same for both of the above records). This +should be done only if your drive does not support variable block sizes, or +you have some other strong reasons for using fixed block sizes. As mentioned +above, a small fixed block size of 512 or 1024 bytes will be very inefficient. +Try to set any fixed block size to something like 64,512 bytes or larger if +your drive will support it. + +Also, note that the {\bf Medium Type} field of the output of {\bf tapeinfo} +reports {\bf Not Loaded}, which is not correct. As a consequence, you should +ignore that field as well as the {\bf Attached Changer} field. + +To recover files from tapes written in fixed block mode, see below. +\label{FreeBSDTapes} + +\subsection{Tape Modes on FreeBSD} +\index[general]{FreeBSD!Tape Modes on} +\index[general]{Tape Modes on FreeBSD} + +On most FreeBSD systems such as 4.9 and most tape drives, Bacula should run +with: + +\footnotesize +\begin{verbatim} +mt -f /dev/nsa0 seteotmodel 2 +mt -f /dev/nsa0 blocksize 0 +mt -f /dev/nsa0 comp enable +\end{verbatim} +\normalsize + +You might want to put those commands in a startup script to make sure your +tape driver is properly initialized before running Bacula, because +depending on your system configuration, these modes may be reset if a +program other than Bacula uses the drive or when your system is rebooted. + +Then according to what the {\bf btape test} command returns, you will probably +need to set the following (see below for an alternative): + +\footnotesize +\begin{verbatim} + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Backward Space File = no + Fast Forward Space File = no + TWO EOF = yes +\end{verbatim} +\normalsize + +Then be sure to run some append tests with Bacula where you start and stop +Bacula between appending to the tape, or use {\bf btape} version 1.35.1 or +greater, which includes simulation of stopping/restarting Bacula. + +Please see the file {\bf platforms/freebsd/pthreads-fix.txt} in the main +Bacula directory concerning {\bf important} information concerning +compatibility of Bacula and your system. A much more optimal Device +configuration is shown below, but does not work with all tape drives. Please +test carefully before putting either into production. + +Note, for FreeBSD 4.10-RELEASE, using a Sony TSL11000 L100 DDS4 with an +autochanger set to variable block size and DCLZ compression, Brian McDonald +reports that to get Bacula to append correctly between Bacula executions, +the correct values to use are: + +\footnotesize +\begin{verbatim} +mt -f /dev/nsa0 seteotmodel 1 +mt -f /dev/nsa0 blocksize 0 +mt -f /dev/nsa0 comp enable +\end{verbatim} +\normalsize + +and + +\footnotesize +\begin{verbatim} + Hardware End of Medium = no + BSF at EOM = no + Backward Space Record = no + Backward Space File = no + Fast Forward Space File = yes + TWO EOF = no +\end{verbatim} +\normalsize + +This has been confirmed by several other people using different hardware. This +configuration is the preferred one because it uses one EOF and no backspacing +at the end of the tape, which works much more efficiently and reliably with +modern tape drives. + +Finally, here is a Device configuration that Danny Butroyd reports to work +correctly with the Overland Powerloader tape library using LT0-2 and +FreeBSD 5.4-Stable: + +\footnotesize +\begin{verbatim} +# Overland Powerloader LT02 - 17 slots single drive +Device { + Name = Powerloader + Media Type = LT0-2 + Archive Device = /dev/nsa0 + AutomaticMount = yes; + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Changer Command = "/usr/local/sbin/mtx-changer %c %o %S %a %d" + Changer Device = /dev/pass2 + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" + + # FreeBSD Specific Settings + Offline On Unmount = no + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Fast Forward Space File = no + TWO EOF = yes +} + +The following Device resource works fine with Dell PowerVault 110T and +120T devices on both FreeBSD 5.3 and on NetBSD 3.0. It also works +with Sony AIT-2 drives on FreeBSD. +\footnotesize +\begin{verbatim} +Device { + ... + # FreeBSD/NetBSD Specific Settings + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Fast Forward Space File = yes + TWO EOF = yes +} +\end{verbatim} +\normalsize + +On FreeBSD version 6.0, it is reported that you can even set +Backward Space Record = yes. + + + +\subsection{Finding your Tape Drives and Autochangers on FreeBSD} +\index[general]{FreeBSD!Finding Tape Drives and Autochangers} +\index[general]{Finding Tape Drives and Autochangers on FreeBSD} + +On FreeBSD, you can do a {\bf camcontrol devlist} as root to determine what +drives and autochangers you have. For example, + +\footnotesize +\begin{verbatim} +undef# camcontrol devlist + at scbus0 target 2 lun 0 (pass0,sa0) + at scbus0 target 4 lun 0 (pass1,sa1) + at scbus0 target 4 lun 1 (pass2) +\end{verbatim} +\normalsize + +from the above, you can determine that there is a tape drive on {\bf /dev/sa0} +and another on {\bf /dev/sa1} in addition since there is a second line for the +drive on {\bf /dev/sa1}, you know can assume that it is the control device for +the autochanger (i.e. {\bf /dev/pass2}). It is also the control device name to +use when invoking the tapeinfo program. E.g. + +\footnotesize +\begin{verbatim} +tapeinfo -f /dev/pass2 +\end{verbatim} +\normalsize + +\label{onstream} + +\subsection{Using the OnStream driver on Linux Systems} +\index[general]{Using the OnStream driver on Linux Systems} +\index[general]{Systems!Using the OnStream driver on Linux} + +Bacula version 1.33 (not 1.32x) is now working and ready for testing with the +OnStream kernel osst driver version 0.9.14 or above. Osst is available from: +\elink{http://sourceforge.net/projects/osst/} +{http://sourceforge.net/projects/osst/}. + +To make Bacula work you must first load the new driver then, as root, do: + +\footnotesize +\begin{verbatim} + mt -f /dev/nosst0 defblksize 32768 +\end{verbatim} +\normalsize + +Also you must add the following to your Device resource in your Storage +daemon's conf file: + +\footnotesize +\begin{verbatim} + Minimum Block Size = 32768 + Maximum Block Size = 32768 +\end{verbatim} +\normalsize + +Here is a Device specification provided by Michel Meyers that is known to +work: + +\footnotesize +\begin{verbatim} +Device { + Name = "Onstream DI-30" + Media Type = "ADR-30" + Archive Device = /dev/nosst0 + Minimum Block Size = 32768 + Maximum Block Size = 32768 + Hardware End of Medium = yes + BSF at EOM = no + Backward Space File = yes + Fast Forward Space File = yes + Two EOF = no + AutomaticMount = yes + AlwaysOpen = yes + Removable Media = yes +} +\end{verbatim} +\normalsize + +\section{Hardware Compression on EXB-8900} +\index[general]{Hardware Compression on EXB-8900} +\index[general]{EXB-8900!Hardware Compression} + +To active, check, or disable the hardware compression feature +on an EXB-8900, use the exabyte MammothTool. You can get it here: +\elink{http://www.exabyte.com/support/online/downloads/index.cfm} +{http://www.exabyte.com/support/online/downloads/index.cfm}. +There is a Solaris version of this tool. With option -C 0 or 1 you +can disable or activate compression. Start this tool without any +options for a small reference. + +\label{fill} +\subsection{Using btape to Simulate Filling a Tape} +\index[general]{Using btape to Simulate Filling a Tape} +\index[general]{Tape!Using btape to Simulate Filling} + +Because there are often problems with certain tape drives or systems when end +of tape conditions occur, {\bf btape} has a special command {\bf fill} that +causes it to write random data to a tape until the tape fills. It then writes +at least one more Bacula block to a second tape. Finally, it reads back both +tapes to ensure that the data has been written in a way that Bacula can +recover it. Note, there is also a single tape option as noted below, which you +should use rather than the two tape test. See below for more details. + +This can be an extremely time consuming process (here it is about 6 hours) to +fill a full tape. Note, that btape writes random data to the tape when it is +filling it. This has two consequences: 1. it takes a bit longer to generate +the data, especially on slow CPUs. 2. the total amount of data is +approximately the real physical capacity of your tape, regardless of whether +or not the tape drive compression is on or off. This is because random data +does not compress very much. + +To begin this test, you enter the {\bf fill} command and follow the +instructions. There are two options: the simple single tape option and the +multiple tape option. Please use only the simple single tape option because +the multiple tape option still doesn't work totally correctly. If the single +tape option does not succeed, you should correct the problem before using +Bacula. +\label{RecoveringFiles} + +\section{Recovering Files Written With Fixed Block Sizes} +\index[general]{Recovering Files Written With Fixed Block Sizes} + +If you have been previously running your tape drive in fixed block mode +(default 512) and Bacula with variable blocks (default), then in version +1.32f-x and 1.34 and above, Bacula will fail to recover files because it does +block spacing, and because the block sizes don't agree between your tape drive +and Bacula it will not work. + +The long term solution is to run your drive in variable block mode as +described above. However, if you have written tapes using fixed block sizes, +this can be a bit of a pain. The solution to the problem is: while you are +doing a restore command using a tape written in fixed block size, ensure that +your drive is set to the fixed block size used while the tape was written. +Then when doing the {\bf restore} command in the Console program, do not +answer the prompt {\bf yes/mod/no}. Instead, edit the bootstrap file (the +location is listed in the prompt) using any ASCII editor. Remove all {\bf +VolBlock} lines in the file. When the file is re-written, answer the question, +and Bacula will run without using block positioning, and it should recover +your files. + +\label{BlockModes} +\section{Tape Blocking Modes} +\index[general]{Modes!Tape Blocking} +\index[general]{Tape Blocking Modes} + +SCSI tapes may either be written in {\bf variable} or {\bf fixed} block sizes. +Newer drives support both modes, but some drives such as the QIC devices +always use fixed block sizes. Bacula attempts to fill and write complete +blocks (default 65K), so that in normal mode (variable block size), Bacula +will always write blocks of the same size except the last block of a Job. If +Bacula is configured to write fixed block sizes, it will pad the last block of +the Job to the correct size. Bacula expects variable tape block size drives to +behave as follows: Each write to the drive results in a single record being +written to the tape. Each read returns a single record. If you request less +bytes than are in the record, only those number of bytes will be returned, but +the entire logical record will have been read (the next read will retrieve the +next record). Thus data from a single write is always returned in a single +read, and sequentially written records are returned by sequential reads. + +Bacula expects fixed block size tape drives to behave as follows: If a write +length is greater than the physical block size of the drive, the write will be +written as two blocks each of the fixed physical size. This single write may +become multiple physical records on the tape. (This is not a good situation). +According to the documentation, one may never write an amount of data that is +not the exact multiple of the blocksize (it is not specified if an error +occurs or if the the last record is padded). When reading, it is my +understanding that each read request reads one physical record from the tape. +Due to the complications of fixed block size tape drives, you should avoid +them if possible with Bacula, or you must be ABSOLUTELY certain that you use +fixed block sizes within Bacula that correspond to the physical block size of +the tape drive. This will ensure that Bacula has a one to one correspondence +between what it writes and the physical record on the tape. + +Please note that Bacula will not function correctly if it writes a block and +that block is split into two or more physical records on the tape. Bacula +assumes that each write causes a single record to be written, and that it can +sequentially recover each of the blocks it has written by using the same +number of sequential reads as it had written. + +\section{Details of Tape Modes} +\index[general]{Modes!Details} +\index[general]{Details of Tape Modes} +Rudolf Cejka has provided the following information concerning +certain tape modes and MTEOM. + +\begin{description} +\item[Tape level] + It is always possible to position filemarks or blocks, whereas + positioning to the end-of-data is only optional feature, however it is + implemented very often. SCSI specification also talks about optional + sequential filemarks, setmarks and sequential setmarks, but these are not + implemented so often. Modern tape drives keep track of file positions in + built-in chip (AIT, LTO) or at the beginning of the tape (SDLT), so there + is not any speed difference, if end-of-data or filemarks is used (I have + heard, that LTO-1 from all 3 manufacturers do not use its chip for file + locations, but a tape as in SDLT case, and I'm not sure about LTO-2 and + LTO-3 case). However there is a big difference, that end-of-data ignores + file position, whereas filemarks returns the real number of skipped + files, so OS can track current file number just in filemarks case. + +\item[OS level] + Solaris does use just SCSI SPACE Filemarks, it does not support SCSI + SPACE End-of-data. When MTEOM is called, Solaris does use SCSI SPACE + Filemarks with count = 1048576 for fast mode, and combination of SCSI + SPACE Filemarks with count = 1 with SCSI SPACE Blocks with count = 1 for + slow mode, so EOD mark on the tape on some older tape drives is not + skipped. File number is always tracked for MTEOM. + + Linux does support both SCSI SPACE Filemarks and End-of-data: When MTEOM + is called in MT\_ST\_FAST\_MTEOM mode, SCSI SPACE End-of-data is used. + In the other case, SCSI SPACE Filemarks with count = + 8388607 is used. + There is no real slow mode like in Solaris - I just expect, that for + older tape drives Filemarks may be slower than End-of-data, but not so + much as in Solaris slow mode. File number is tracked for MTEOM just + without MT\_ST\_FAST\_MTEOM - when MT\_ST\_FAST\_MTEOM is used, it is not. + + FreeBSD does support both SCSI SPACE Filemarks and End-of-data, but when + MTEOD (MTEOM) is called, SCSI SPACE End-of-data is always used. FreeBSD + never use SCSI SPACE Filemarks for MTEOD. File number is never tracked + for MTEOD. + +\item[Bacula level] + When {\bf Hardware End of Medium = Yes} is used, MTEOM is called, but it + does not mean, that hardware End-of-data must be used. When Hardware End + of Medium = No, if Fast Forward Space File = Yes, MTFSF with count = + 32767 is used, else Block Read with count = 1 with Forward Space File + with count = 1 is used, which is really very slow. + +\item [Hardware End of Medium = Yes|No] + The name of this option is misleading and is the source of confusion, + because it is not the hardware EOM, what is really switched here. + + If I use Yes, OS must not use SCSI SPACE End-of-data, because Bacula + expects, that there is tracked file number, which is not supported by + SCSI specification. Instead, the OS have to use SCSI SPACE Filemarks. + + If I use No, an action depends on Fast Forward Space File. + + When I set {\bf Hardware End of Medium = no} + and {\bf Fast Forward Space File = no} + file positioning was very slow + on my LTO-3 (about ten to 100 minutes), but + + with {\bf Hardware End of Medium = no} and +{\bf Fast Forward Space File = yes}, the time is ten to +100 times faster (about one to two minutes). + +\end{description} + +\section{Autochanger Errors} +\index[general]{Errors!Autochanger} +\index[general]{Autochanger Errors} + +If you are getting errors such as: + +\footnotesize +\begin{verbatim} +3992 Bad autochanger "load slot 1, drive 1": ERR=Child exited with code 1. +\end{verbatim} +\normalsize + +and you are running your Storage daemon as non-root, then most likely +you are having permissions problems with the control channel. Running +as root, set permissions on /dev/sgX so that the userid and group of +your Storage daemon can access the device. You need to ensure that you +all access to the proper control device, and if you don't have any +SCSI disk drives (including SATA drives), you might want to change +the permissions on /dev/sg*. + +\section{Syslog Errors} +\index[general]{Errors!Syslog} +\index[general]{Syslog Errors} + +If you are getting errors such as: + +\footnotesize +\begin{verbatim} +: kernel: st0: MTSETDRVBUFFER only allowed for root +\end{verbatim} +\normalsize + +you are most likely running your Storage daemon as non-root, and +Bacula is attempting to set the correct OS buffering to correspond +to your Device resource. Most OSes allow only root to issue this +ioctl command. In general, the message can be ignored providing +you are sure that your OS parameters are properly configured as +described earlier in this manual. If you are running your Storage daemon +as root, you should not be getting these system log messages, and if +you are, something is probably wrong. diff --git a/docs/manuals/de/problems/tips.tex b/docs/manuals/de/problems/tips.tex new file mode 100644 index 00000000..d0e77f03 --- /dev/null +++ b/docs/manuals/de/problems/tips.tex @@ -0,0 +1,1045 @@ +%% +%% + +\chapter{Tips and Suggestions} +\label{TipsChapter} +\index[general]{Tips and Suggestions } +\index[general]{Suggestions!Tips and } +\label{examples} +\index[general]{Examples } + +There are a number of example scripts for various things that can be found in +the {\bf example} subdirectory and its subdirectories of the Bacula source +distribution. + +For additional tips, please see the \elink{Bacula +wiki}{\url{http://wiki.bacula.org}}. + +\section{Upgrading Bacula Versions} +\label{upgrading} +\index[general]{Upgrading Bacula Versions } +\index[general]{Versions!Upgrading Bacula } +\index[general]{Upgrading} + +The first thing to do before upgrading from one version to another is to +ensure that you don't overwrite or delete your production (current) version +of Bacula until you have tested that the new version works. + +If you have installed Bacula into a single directory, this is simple: simply +make a copy of your Bacula directory. + +If you have done a more typical Unix installation where the binaries are +placed in one directory and the configuration files are placed in another, +then the simplest way is to configure your new Bacula to go into a single +file. Alternatively, make copies of all your binaries and especially your +conf files. + +Whatever your situation may be (one of the two just described), you should +probably start with the {\bf defaultconf} script that can be found in the {\bf +examples} subdirectory. Copy this script to the main Bacula directory, modify +it as necessary (there should not need to be many modifications), configure +Bacula, build it, install it, then stop your production Bacula, copy all the +{\bf *.conf} files from your production Bacula directory to the test Bacula +directory, start the test version, and run a few test backups. If all seems +good, then you can proceed to install the new Bacula in place of or possibly +over the old Bacula. + +When installing a new Bacula you need not worry about losing the changes you +made to your configuration files as the installation process will not +overwrite them providing that you do not do a {\bf make uninstall}. + +If the new version of Bacula requires an upgrade to the database, +you can upgrade it with the script {\bf update\_bacula\_tables}, which +will be installed in your scripts directory (default {\bf /etc/bacula}), +or alternatively, you can find it in the +{\bf \lt{}bacula-source\gt{}/src/cats} directory. + +\section{Getting Notified of Job Completion} +\label{notification} +\index[general]{Getting Notified of Job Completion } +\index[general]{Completion!Getting Notified of Job } + +One of the first things you should do is to ensure that you are being properly +notified of the status of each Job run by Bacula, or at a minimum of each Job +that terminates with an error. + +Until you are completely comfortable with {\bf Bacula}, we recommend that you +send an email to yourself for each Job that is run. This is most easily +accomplished by adding an email notification address in the {\bf Messages} +resource of your Director's configuration file. An email is automatically +configured in the default configuration files, but you must ensure that the +default {\bf root} address is replaced by your email address. + +For additional examples of how to configure a Bacula, please take a look at the +{\bf .conf} files found in the {\bf examples} sub-directory. We recommend the +following configuration (where you change the paths and email address to +correspond to your setup). Note, the {\bf mailcommand} and {\bf +operatorcommand} should be on a single line. They were split here for +presentation: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mailcommand = "/home/bacula/bin/bsmtp -h localhost + -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "/home/bacula/bin/bsmtp -h localhost + -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" + Mail = your-email-address = all, !skipped, !terminate + append = "/home/bacula/bin/log" = all, !skipped, !terminate + operator = your-email-address = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize + +You will need to ensure that the {\bf /home/bacula/bin} path on the {\bf +mailcommand} and the {\bf operatorcommand} lines point to your {\bf Bacula} +binary directory where the {\bf bsmtp} program will be installed. You will +also want to ensure that the {\bf your-email-address} is replaced by your +email address, and finally, you will also need to ensure that the {\bf +/home/bacula/bin/log} points to the file where you want to log all messages. + +With the above Messages resource, you will be notified by email of every Job +that ran, all the output will be appended to the {\bf log} file you specify, +all output will be directed to the console program, and all mount messages +will be emailed to you. Note, some messages will be sent to multiple +destinations. + +The form of the mailcommand is a bit complicated, but it allows you to +distinguish whether the Job terminated in error or terminated normally. Please +see the +\ilink{Mail Command}{mailcommand} section of the Messages +Resource chapter of this manual for the details of the substitution characters +used above. + +Once you are totally comfortable with Bacula as I am, or if you have a large +number of nightly Jobs as I do (eight), you will probably want to change the +{\bf Mail} command to {\bf Mail On Error} which will generate an email message +only if the Job terminates in error. If the Job terminates normally, no email +message will be sent, but the output will still be appended to the log file as +well as sent to the Console program. + +\section{Getting Email Notification to Work} +\label{email} +\index[general]{Work!Getting Email Notification to } +\index[general]{Getting Email Notification to Work } + +The section above describes how to get email notification of job status. +Occasionally, however, users have problems receiving any email at all. In that +case, the things to check are the following: + +\begin{itemize} +\item Ensure that you have a valid email address specified on your {\bf Mail} + record in the Director's Messages resource. The email address should be fully + qualified. Simply using {\bf root} generally will not work, rather you should +use {\bf root@localhost} or better yet your full domain. +\item Ensure that you do not have a {\bf Mail} record in the Storage daemon's + or File daemon's configuration files. The only record you should have is {\bf + director}: + +\footnotesize +\begin{verbatim} + director = director-name = all + +\end{verbatim} +\normalsize + +\item If all else fails, try replacing the {\bf mailcommand} with + + \footnotesize +\begin{verbatim} +mailcommand = "mail -s test your@domain.com" +\end{verbatim} +\normalsize + +\item Once the above is working, assuming you want to use {\bf bsmtp}, submit + the desired bsmtp command by hand and ensure that the email is delivered, + then put that command into {\bf Bacula}. Small differences in things such as +the parenthesis around the word Bacula can make a big difference to some +bsmtp programs. For example, you might start simply by using: + +\footnotesize +\begin{verbatim} +mailcommand = "/home/bacula/bin/bsmtp -f \"root@localhost\" %r" +\end{verbatim} +\normalsize + +\end{itemize} + +\section{Getting Notified that Bacula is Running} +\label{JobNotification} +\index[general]{Running!Getting Notified that Bacula is } +\index[general]{Getting Notified that Bacula is Running } + +If like me, you have setup Bacula so that email is sent only when a Job has +errors, as described in the previous section of this chapter, inevitably, one +day, something will go wrong and {\bf Bacula} can stall. This could be because +Bacula crashes, which is vary rare, or more likely the network has caused {\bf +Bacula} to {\bf hang} for some unknown reason. + +To avoid this, you can use the {\bf RunAfterJob} command in the Job resource +to schedule a Job nightly, or weekly that simply emails you a message saying +that Bacula is still running. For example, I have setup the following Job in +my Director's configuration file: + +\footnotesize +\begin{verbatim} +Schedule { + Name = "Watchdog" + Run = Level=Full sun-sat at 6:05 +} +Job { + Name = "Watchdog" + Type = Admin + Client=Watchdog + FileSet="Verify Set" + Messages = Standard + Storage = DLTDrive + Pool = Default + Schedule = "Watchdog" + RunAfterJob = "/home/kern/bacula/bin/watchdog %c %d" +} +Client { + Name = Watchdog + Address = rufus + FDPort = 9102 + Catalog = Verify + Password = "" + File Retention = 1day + Job Retention = 1 month + AutoPrune = yes +} +\end{verbatim} +\normalsize + +Where I established a schedule to run the Job nightly. The Job itself is type +{\bf Admin} which means that it doesn't actually do anything, and I've defined +a FileSet, Pool, Storage, and Client, all of which are not really used (and +probably don't need to be specified). The key aspect of this Job is the +command: + +\footnotesize +\begin{verbatim} + RunAfterJob = "/home/kern/bacula/bin/watchdog %c %d" +\end{verbatim} +\normalsize + +which runs my "watchdog" script. As an example, I have added the Job codes +\%c and \%d which will cause the Client name and the Director's name to be +passed to the script. For example, if the Client's name is {\bf Watchdog} and +the Director's name is {\bf main-dir} then referencing \$1 in the script would +get {\bf Watchdog} and referencing \$2 would get {\bf main-dir}. In this case, +having the script know the Client and Director's name is not really useful, +but in other situations it may be. + +You can put anything in the watchdog script. In my case, I like to monitor the +size of my catalog to be sure that {\bf Bacula} is really pruning it. The +following is my watchdog script: + +\footnotesize +\begin{verbatim} +#!/bin/sh +cd /home/kern/mysql/var/bacula +du . * | +/home/kern/bacula/bin/bsmtp \ + -f "\(Bacula\) abuse@whitehouse.com" -h mail.yyyy.com \ + -s "Bacula running" abuse@whitehouse.com +\end{verbatim} +\normalsize + +If you just wish to send yourself a message, you can do it with: + +\footnotesize +\begin{verbatim} +#!/bin/sh +cd /home/kern/mysql/var/bacula +/home/kern/bacula/bin/bsmtp \ + -f "\(Bacula\) abuse@whitehouse.com" -h mail.yyyy.com \ + -s "Bacula running" abuse@whitehouse.com </volume-list + exit 0 +\end{verbatim} +\normalsize + +so that the whole case looks like: + +\footnotesize +\begin{verbatim} + list) +# +# commented out lines + cat /volume-list + exit 0 + ;; +\end{verbatim} +\normalsize + +where you replace \lt{}absolute-path\gt{} with the full path to the +volume-list file. Then using the console, you enter the following command: + +\footnotesize +\begin{verbatim} + label barcodes +\end{verbatim} +\normalsize + +and Bacula will proceed to mount the autochanger Volumes in the list and label +them with the Volume names you have supplied. Bacula will think that the list +was provided by the autochanger barcodes, but in reality, it was you who +supplied the \lt{}barcodes\gt{}. + +If it seems to work, when it finishes, enter: + +\footnotesize +\begin{verbatim} + list volumes +\end{verbatim} +\normalsize + +and you should see all the volumes nicely created. + +\section{Backing Up Portables Using DHCP} +\label{DNS} +\index[general]{DHCP!Backing Up Portables Using } +\index[general]{Backing Up Portables Using DHCP } + +You may want to backup laptops or portables that are not always connected to +the network. If you are using DHCP to assign an IP address to those machines +when they connect, you will need to use the Dynamic Update capability of DNS +to assign a name to those machines that can be used in the Address field of +the Client resource in the Director's conf file. + +\section{Going on Vacation} +\label{Vacation} +\index[general]{Vacation!Going on } +\index[general]{Going on Vacation } + +At some point, you may want to be absent for a week or two and you want to +make sure Bacula has enough tape left so that the backups will complete. You +start by doing a {\bf list volumes} in the Console program: + +\footnotesize +\begin{verbatim} +list volumes + +Using default Catalog name=BackupDB DB=bacula +Pool: Default ++---------+---------------+-----------+-----------+----------------+- +| MediaId | VolumeName | MediaType | VolStatus | VolBytes | ++---------+---------------+-----------+-----------+----------------+- +| 23 | DLT-30Nov02 | DLT8000 | Full | 54,739,278,128 | +| 24 | DLT-21Dec02 | DLT8000 | Full | 56,331,524,629 | +| 25 | DLT-11Jan03 | DLT8000 | Full | 67,863,514,895 | +| 26 | DLT-02Feb03 | DLT8000 | Full | 63,439,314,216 | +| 27 | DLT-03Mar03 | DLT8000 | Full | 66,022,754,598 | +| 28 | DLT-04Apr03 | DLT8000 | Full | 60,792,559,924 | +| 29 | DLT-28Apr03 | DLT8000 | Full | 62,072,494,063 | +| 30 | DLT-17May03 | DLT8000 | Full | 65,901,767,839 | +| 31 | DLT-07Jun03 | DLT8000 | Used | 56,558,490,015 | +| 32 | DLT-28Jun03 | DLT8000 | Full | 64,274,871,265 | +| 33 | DLT-19Jul03 | DLT8000 | Full | 64,648,749,480 | +| 34 | DLT-08Aug03 | DLT8000 | Full | 64,293,941,255 | +| 35 | DLT-24Aug03 | DLT8000 | Append | 9,999,216,782 | ++---------+---------------+-----------+-----------+----------------+ +\end{verbatim} +\normalsize + +Note, I have truncated the output for presentation purposes. What is +significant, is that I can see that my current tape has almost 10 Gbytes of +data, and that the average amount of data I get on my tapes is about 60 +Gbytes. So if I go on vacation now, I don't need to worry about tape capacity +(at least not for short absences). + +Equally significant is the fact that I did go on vacation the 28th of June +2003, and when I did the {\bf list volumes} command, my current tape at that +time, DLT-07Jun03 MediaId 31, had 56.5 Gbytes written. I could see that the +tape would fill shortly. Consequently, I manually marked it as {\bf Used} and +replaced it with a fresh tape that I labeled as DLT-28Jun03, thus assuring +myself that the backups would all complete without my intervention. + +\section{Exclude Files on Windows Regardless of Case} +\label{Case} +\index[general]{Exclude Files on Windows Regardless of Case} +% TODO: should this be put in the win32 chapter? +% TODO: should all these tips be placed in other chapters? + +This tip was submitted by Marc Brueckner who wasn't sure of the case of some +of his files on Win32, which is case insensitive. The problem is that Bacula +thinks that {\bf /UNIMPORTANT FILES} is different from {\bf /Unimportant +Files}. Marc was aware that the file exclusion permits wild-cards. So, he +specified: + +\footnotesize +\begin{verbatim} +"/[Uu][Nn][Ii][Mm][Pp][Oo][Rr][Tt][Aa][Nn][Tt] [Ff][Ii][Ll][Ee][Ss]" +\end{verbatim} +\normalsize + +As a consequence, the above exclude works for files of any case. + +Please note that this works only in Bacula Exclude statement and not in +Include. + +\section{Executing Scripts on a Remote Machine} +\label{RemoteExecution} +\index[general]{Machine!Executing Scripts on a Remote } +\index[general]{Executing Scripts on a Remote Machine } + +This tip also comes from Marc Brueckner. (Note, this tip is probably outdated +by the addition of {\bf ClientRunBeforJob} and {\bf ClientRunAfterJob} Job +records, but the technique still could be useful.) First I thought the "Run +Before Job" statement in the Job-resource is for executing a script on the +remote machine (the machine to be backed up). (Note, this is possible as mentioned +above by using {\bf ClientRunBeforJob} and {\bf ClientRunAfterJob}). +It could be useful to execute +scripts on the remote machine e.g. for stopping databases or other services +while doing the backup. (Of course I have to start the services again when the +backup has finished) I found the following solution: Bacula could execute +scripts on the remote machine by using ssh. The authentication is done +automatically using a private key. First you have to generate a keypair. I've +done this by: + +\footnotesize +\begin{verbatim} +ssh-keygen -b 4096 -t dsa -f Bacula_key +\end{verbatim} +\normalsize + +This statement may take a little time to run. It creates a public/private key +pair with no passphrase. You could save the keys in /etc/bacula. Now you have +two new files : Bacula\_key which contains the private key and Bacula\_key.pub +which contains the public key. + +Now you have to append the Bacula\_key.pub file to the file authorized\_keys +in the \textbackslash{}root\textbackslash{}.ssh directory of the remote +machine. Then you have to add (or uncomment) the line + +\footnotesize +\begin{verbatim} +AuthorizedKeysFile %h/.ssh/authorized_keys +\end{verbatim} +\normalsize + +to the sshd\_config file on the remote machine. Where the \%h stands for the +home-directory of the user (root in this case). + +Assuming that your sshd is already running on the remote machine, you can now +enter the following on the machine where Bacula runs: + +\footnotesize +\begin{verbatim} +ssh -i Bacula_key -l root "ls -la" +\end{verbatim} +\normalsize + +This should execute the "ls -la" command on the remote machine. + +Now you could add lines like the following to your Director's conf file: + +\footnotesize +\begin{verbatim} +... +Run Before Job = ssh -i /etc/bacula/Bacula_key 192.168.1.1 \ + "/etc/init.d/database stop" +Run After Job = ssh -i /etc/bacula/Bacula_key 192.168.1.1 \ + "/etc/init.d/database start" +... +\end{verbatim} +\normalsize + +Even though Bacula version 1.32 and later has a ClientRunBeforeJob, the ssh method still +could be useful for updating all the Bacula clients on several remote machines +in a single script. + +\section{Recycling All Your Volumes} +\label{recycle} +\index[general]{Recycling All Your Volumes } +\index[general]{Volumes!Recycling All Your } + +This tip comes from Phil Stracchino. + +If you decide to blow away your catalog and start over, the simplest way to +re-add all your prelabeled tapes with a minimum of fuss (provided you don't +care about the data on the tapes) is to add the tape labels using the console +{\bf add} command, then go into the catalog and manually set the VolStatus of +every tape to {\bf Recycle}. + +The SQL command to do this is very simple, either use your vendor's +command line interface (mysql, postgres, sqlite, ...) or use the sql +command in the Bacula console: + +\footnotesize +\begin{verbatim} +update Media set VolStatus='Recycle'; +\end{verbatim} +\normalsize + +Bacula will then ignore the data already stored on the tapes and just re-use +each tape without further objection. + +\section{Backing up ACLs on ext3 or XFS filesystems} +\label{ACLs} +\index[general]{Filesystems!Backing up ACLs on ext3 or XFS } +\index[general]{Backing up ACLs on ext3 or XFS filesystems } + +This tip comes from Volker Sauer. + +Note, this tip was given prior to implementation of ACLs in Bacula (version +1.34.5). It is left here because dumping/displaying ACLs can still be useful +in testing/verifying that Bacula is backing up and restoring your ACLs +properly. Please see the +\ilink{aclsupport}{ACLSupport} FileSet option in the +configuration chapter of this manual. + +For example, you could dump the ACLs to a file with a script similar to the +following: + +\footnotesize +\begin{verbatim} +#!/bin/sh +BACKUP_DIRS="/foo /bar" +STORE_ACL=/root/acl-backup +umask 077 +for i in $BACKUP_DIRS; do + cd $i /usr/bin/getfacl -R --skip-base .>$STORE_ACL/${i//\//_} +done +\end{verbatim} +\normalsize + +Then use Bacula to backup {\bf /root/acl-backup}. + +The ACLs could be restored using Bacula to the {\bf /root/acl-backup} file, +then restored to your system using: + +\footnotesize +\begin{verbatim} +setfacl --restore/root/acl-backup +\end{verbatim} +\normalsize + +\section{Total Automation of Bacula Tape Handling} +\label{automate} +\index[general]{Handling!Total Automation of Bacula Tape } +\index[general]{Total Automation of Bacula Tape Handling } + +This tip was provided by Alexander Kuehn. + +\elink{Bacula}{\url{http://www.bacula.org/}} is a really nice backup program except +that the manual tape changing requires user interaction with the bacula +console. + +Fortunately I can fix this. +NOTE!!! This suggestion applies for people who do *NOT* have tape autochangers +and must change tapes manually.!!!!! + +Bacula supports a variety of tape changers through the use of mtx-changer +scripts/programs. This highly flexible approach allowed me to create +\elink{this shell script}{\url{http://www.bacula.org/rel-manual/mtx-changer.txt}} which does the following: +% TODO: We need to include this in book appendix and point to it. +% TODO: +Whenever a new tape is required it sends a mail to the operator to insert the +new tape. Then it waits until a tape has been inserted, sends a mail again to +say thank you and let's bacula continue its backup. +So you can schedule and run backups without ever having to log on or see the +console. +To make the whole thing work you need to create a Device resource which looks +something like this ("Archive Device", "Maximum Changer Wait", "Media +Type" and "Label media" may have different values): + +\footnotesize +\begin{verbatim} +Device { + Name=DDS3 + Archive Device = # use yours not mine! ;)/dev/nsa0 + Changer Device = # not really required/dev/nsa0 + Changer Command = "# use this (maybe change the path)! + /usr/local/bin/mtx-changer %o %a %S" + Maximum Changer Wait = 3d # 3 days in seconds + AutomaticMount = yes; # mount on start + AlwaysOpen = yes; # keep device locked + Media Type = DDS3 # it's just a name + RemovableMedia = yes; # + Offline On Unmount = Yes; # keep this too + Label media = Yes; # +} +\end{verbatim} +\normalsize + +As the script has to emulate the complete wisdom of a mtx-changer it has an +internal "database" containing where which tape is stored, you can see this on +the following line: + +\footnotesize +\begin{verbatim} +labels="VOL-0001 VOL-0002 VOL-0003 VOL-0004 VOL-0005 VOL-0006 +VOL-0007 VOL-0008 VOL-0009 VOL-0010 VOL-0011 VOL-0012" +\end{verbatim} +\normalsize + +The above should be all on one line, and it effectively tells Bacula that +volume "VOL-0001" is located in slot 1 (which is our lowest slot), that +volume "VOL-0002" is located in slot 2 and so on.. +The script also maintains a logfile (/var/log/mtx.log) where you can monitor +its operation. + +\section{Running Concurrent Jobs} +\label{ConcurrentJobs} +\index[general]{Jobs!Running Concurrent} +\index[general]{Running Concurrent Jobs} +\index[general]{Concurrent Jobs} + +Bacula can run multiple concurrent jobs, but the default configuration files +do not enable it. Using the {\bf Maximum Concurrent Jobs} directive, you +can configure how many and which jobs can be run simultaneously. +The Director's default value for {\bf Maximum Concurrent Jobs} is "1". + +To initially setup concurrent jobs you need to define {\bf Maximum Concurrent Jobs} in +the Director's configuration file (bacula-dir.conf) in the +Director, Job, Client, and Storage resources. + +Additionally the File daemon, and the Storage daemon each have their own +{\bf Maximum Concurrent Jobs} directive that sets the overall maximum +number of concurrent jobs the daemon will run. The default for both the +File daemon and the Storage daemon is "20". + +For example, if you want two different jobs to run simultaneously backing up +the same Client to the same Storage device, they will run concurrently only if +you have set {\bf Maximum Concurrent Jobs} greater than one in the Director +resource, the Client resource, and the Storage resource in bacula-dir.conf. + +We recommend that you read the \ilink{Data +Spooling}{SpoolingChapter} of this manual first, then test your multiple +concurrent backup including restore testing before you put it into +production. + +Below is a super stripped down bacula-dir.conf file showing you the four +places where the the file must be modified to allow the same job {\bf +NightlySave} to run up to four times concurrently. The change to the Job +resource is not necessary if you want different Jobs to run at the same time, +which is the normal case. + +\footnotesize +\begin{verbatim} +# +# Bacula Director Configuration file -- bacula-dir.conf +# +Director { + Name = rufus-dir + Maximum Concurrent Jobs = 4 + ... +} +Job { + Name = "NightlySave" + Maximum Concurrent Jobs = 4 + Client = rufus-fd + Storage = File + ... +} +Client { + Name = rufus-fd + Maximum Concurrent Jobs = 4 + ... +} +Storage { + Name = File + Maximum Concurrent Jobs = 4 + ... +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/de/problems/translate_images.pl b/docs/manuals/de/problems/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/problems/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/problems/update_version b/docs/manuals/de/problems/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/problems/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/problems/update_version.in b/docs/manuals/de/problems/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/problems/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/problems/version.tex b/docs/manuals/de/problems/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/problems/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/problems/version.tex.in b/docs/manuals/de/problems/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/problems/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/de/utility/Makefile b/docs/manuals/de/utility/Makefile new file mode 100644 index 00000000..7136d1b6 --- /dev/null +++ b/docs/manuals/de/utility/Makefile @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=utility + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Utility Programs" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Utilit*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/utility/Makefile.in b/docs/manuals/de/utility/Makefile.in new file mode 100644 index 00000000..7136d1b6 --- /dev/null +++ b/docs/manuals/de/utility/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=utility + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Utility Programs" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Utilit*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/de/utility/bimagemgr-chapter.tex b/docs/manuals/de/utility/bimagemgr-chapter.tex new file mode 100644 index 00000000..6b6239d0 --- /dev/null +++ b/docs/manuals/de/utility/bimagemgr-chapter.tex @@ -0,0 +1,149 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\section{bimagemgr} +\label{bimagemgr} +\index[general]{Bimagemgr } + +{\bf bimagemgr} ist ein Hilfsmittel f\"{u}r diejenigen, die Ihre Backups auf +Festplatten-Volumes speichern und diese Volumes auf CDR brennen wollen. +Es hat eine Web-basierte Bedienoberfl\"{a}che und ist in Perl programmiert. +Es wird benutzt, um zu kontrollieren, wann die Notwendigkeit besteht, eine +Volume-Datei auf eine CD zu brennen. +Es ben\"{o}tigt: + +\begin{itemize} +\item Einen Web-Server der auf derselben Maschine wie Bacula l\"{a}uft +\item Einen auf dem Bacula-Server installierten und konfigurierten CD-Rekorder +\item Das cdr-tools-Paket muss installiert sein +\item perl, perl-DBI Modul, und entweder das DBD-MySQL, DBD-SQLite oder DBD-PostgreSQL Modul +\end{itemize} + +DVD-Brenner werden von bimagemgr zur Zeit nicht unterst\"{u}tzt, das ist aber f\"{u}r +zuk\"{u}nftige Versionen geplant. + +\subsection{bimagemgr Installation} +\index[general]{bimagemgr!Installation } +\index[general]{bimagemgr Installation } + +Installation aus dem tar.gz: +% TODO: use itemized list for this? +1. Pr\"{u}fen und anpassen des Makefile, um es auf Ihre Computer-Konfiguration abzustimmen. +2. Editieren der Datei config.pm ,um sie auf Ihre Konfiguration abzustimmen. +3. F\"{u}hren Sie 'make install' als root aus. +4. Passen Sie in Ihrer httpd.conf das Timeout an. Der Web-Server darf die Verbindung nicht schliessen, +solange der Brennvorgang nicht abgeschlossen ist. Der ben\"{o}tigte Wert, den Sie als Timeout +konfigurieren m\"{u}ssen, h\"{a}ngt von der Geschwindigkeit Ihres CD-Brenners ab, oder ob Sie \"{u}ber das Netzwerk brennen. In den meisten F\"{a}llen reichen 1000 Sekunden als Timeout. Den httpd neu starten. +5. Stellen Sie sicher, dass das Kommando cdrecord als "setuid root" installiert ist. +% TODO: I am pretty sure cdrecord can be used without setuid root +% TODO: as long as devices are setup correctly + +Installation eines rpm-Paketes: +% TODO: use itemized list for this? +1. Installieren Sie das rpm-Paket f\"{u}r Ihre Plattform. +2. Editieren Sie die Datei /cgi-bin/config.pm, um sie an Ihre Konfiguration abzupassen. +3. Passen Sie in Ihrer httpd.conf das Timeout an. Der Web-Server darf die Verbindung nicht schliessen, +solange der Brennvorgang nicht abgeschlossen ist. Der ben\"{o}tigte Wert, den Sie als Timeout +konfigurieren m\"{u}ssen, h\"{a}ngt von der Geschwindigkeit Ihres CD-Brenners ab, oder ob Sie \"{u}ber das Netzwerk brennen. In den meisten F\"{a}llen reichen 1000 Sekunden als Timeout. Den httpd neu starten. +4. Stellen Sie sicher, dass das Kommando cdrecord als "setuid root" installiert ist. + +Zugriff auf die Volume-Dateien: +Die Volume-Dateien haben standardm\"{a}{\ss}ig die Zugriffsrechte 640 gesetzt +und k\"{o}nnen nur von Benutzer root gelesen werden. +Die empfohlene Methode ist die folgende (das funktioniert nur, wenn bacula und bimagemgr +auf demselben Computer laufen wie der Web-Server): + +F\"{u}r Bacula-Versionen 1.34 oder 1.36 installiert aus dem tar.gz - +% TODO: use itemized list for this? +1. Erstellen Sie eine neu Gruppe namens bacula und f\"{u}gen Sie den Benutzer apache dieser Gruppe +hinzu (bei RedHat und Mandrake, bei SuSE ist es der Benutzer wwwrun, bei debian www-data) +2. \"{A}ndern Sie den Eigent\"{u}mer aller Volume-Dateien auf root.bacula. +3. Passen Sie das Script /etc/init.d/bacula an und setzen Sie SD\_USER=root und SD\_GROUP=bacula. +Starten Sie Bacula neu. + +Anmerkung: Schritt Nr. 3 sollte auch in /etc/init.d/bacula-sd gemacht werden, +aber die Dateien aus Bacula-Versionen vor 1.36 unterst\"{u}tzen dies nicht. +In diesem Fall kann es n\"{o}tig sein den Computer neu zu starten, +um '/etc/bacula/bacula restart' auszuf\"{u}hren. + +F\"{u}r Bacula-Versionen 1.38 installiert aus dem tar.gz +% TODO: use itemized list for this? +1. Ihr configure-Aufruf sollte dies beinhalten: +% TODO: fix formatting here + --with-dir-user=bacula + --with-dir-group=bacula + --with-sd-user=bacula + --with-sd-group=disk + --with-fd-user=root + --with-fd-group=bacula +2. F\"{u}gen Sie den Benutzer apache der Gruppe bacula hinzu +(bei RedHat und Mandrake, bei SuSE ist es der Benutzer wwwrun, bei debian www-data) +3. Kontrollieren/\"{A}ndern Sie den Eigent\"{u}mer aller Volume-Dateien auf root.bacula + +F\"{u}r Bacul-Versionen 1.36 oder 1.38 mit rpm installiert - +% TODO: use itemized list for this? +1. F\"{u}gen Sie den Benutzer apache der Gruppe bacula hinzu +(bei RedHat und Mandrake, bei SuSE ist es der Benutzer wwwrun, bei debian www-data) +2. Kontrollieren/\"{A}ndern Sie den Eigent\"{u}mer aller Volume-Dateien auf root.bacula + +Wenn bimagemgr mit einem rpm-Paket Version gr\"{o}{\ss}er 1.38.9 installiert wird, +wird der Web-Server-Benutzer automatisch der Gruppe bacula hinzugef\"{u}gt. +Stellen Sie sicher, dass Sie die Datei config.pm nach der Installation anpassen. + +bimagemgr kann jetzt alle Volume-Dateien lesen, aber sie sind nicht durch alle Benutzer lesbar. + +Wenn Sie bimagemgr auf einen anderen Computer installieren (nicht empfohlen), +m\"{u}ssen Sie die Zugriffsrechte aller Volume-Dateien auf 644 \"{a}ndern, +damit Sie \"{u}ber nfs oder andere Mittel darauf zugreifen k\"{o}nnen. +Beachten Sie, dass bei diesem Vorgehen die Volume-Dateien f\"{u}r alle Benutzer lesbar sind +und Sie den Schutz der Dateien anders sicherstellen. + +\subsection{bimagemgr Benutzung} +\index[general]{bimagemgr!Benutzung } +\index[general]{bimagemgr Benutzung } + +Rufen Sie das Programm mit Ihrem Web-Browser auf, z.B. {\tt http://localhost/cgi-bin/bimagemgr.pl}, +dann sollten Sie eine Darstellung \"{a}hnlich der unten im Bild 1 abgebildeten sehen. +% TODO: use tex to say figure number +Das Programm wird die Bacula-Datenbank abfragen und alle Volume-Dateien mit dem Datum +des letzten Schreibvorgangs und dem Zeitpunkt darstellen, wo das Volume zum letzten +Mal auf CD gebrannt wurde. Wenn ein Volume auf CD gebrannt werden muss (letzter Schreibvorgang +ist neuer als der letzte Brennvorgang), wird ein "Brennen"-Knopf in der rechten Spalte angezeigt. + +\addcontentsline{lof}{figure}{Bacula CD Image Manager} +\includegraphics{./bimagemgr1.eps} \\Figure 1 +% TODO: use tex to say figure number + +Legen Sie eine leere CD in Ihren CD-Brenner und klicken Sie auf den "Brennen"-Knopf. +Dann \"{o}ffnet sich ein PopUp-Fenster, wie im Bild 2, das den Brennvorgang anzeigt. +% TODO: use tex to say figure number + +\addcontentsline{lof}{figure}{Bacula CD Image Brennfortschritt-Fenster} +\includegraphics{./bimagemgr2.eps} \\Figure 2 +% TODO: use tex to say figure number + +Wenn der Brennvorgang abgeschlo{\ss}en ist, zeigt das PopUp-Fenster die Ausgaben von cdrecord +an (siehe Bild 3). +% TODO: use tex to say figure number +Schlie{\ss}en Sie das PopUp-Fenster und laden Sie die Hauptseite neu. +Das Datum des letzten Brennvorgangs wird aktualisiert und der "Brennen"-Knopf verschwindet. +Sollte das Brennen fehlgeschlagen sein, k\"{o}nnen Sie das Datum des letzten Brennvorgangs +zur\"{u}cksetzen, indem Sie auf den Link "Reset" des Volumes klicken. + +\addcontentsline{lof}{figure}{Bacula CD Image Brennergebnis} +\includegraphics{./bimagemgr3.eps} \\Figure 3 +% TODO: use tex to say figure number + +In der untersten Zeile des Hauptfensters sind zwei weitere Kn\"{o}pfe, +mit "Burn Catalog" und "Blank CDRW" beschriftet. +"Burn Catalog" schreibt eine Kopie Ihrer Katalog-Datenbank auf eine CD. +Falls Sie CDRW-Medien benutzen, k\"{o}nnen Sie mit "Blank CDRW" ein Medium l\"{o}schen +bevor Sie es wiederverwenden. +Regelm\"{a}ssiges speichern Ihrer Volume-Dateien und Ihrer Katalog-Datenbank mit bimagemgr auf CD's +stellt sicher, dass Sie jederzeit im Falle eines Datenverlustes auf Ihrem Bacula-Server +diesen einfach wiederherstellen k\"{o}nnen. diff --git a/docs/manuals/de/utility/check_tex.pl b/docs/manuals/de/utility/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/de/utility/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/de/utility/do_echo b/docs/manuals/de/utility/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/de/utility/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/de/utility/fdl.tex b/docs/manuals/de/utility/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/de/utility/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/de/utility/fix_tex.pl b/docs/manuals/de/utility/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/de/utility/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/de/utility/index.perl b/docs/manuals/de/utility/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/de/utility/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/de/utility/latex2html-init.pl b/docs/manuals/de/utility/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/de/utility/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/de/utility/progs.tex b/docs/manuals/de/utility/progs.tex new file mode 100644 index 00000000..9187970d --- /dev/null +++ b/docs/manuals/de/utility/progs.tex @@ -0,0 +1,1332 @@ +%% +%% + +\chapter{Volume Utility Tools} +\label{_UtilityChapter} +\index[general]{Volume Utility Tools} +\index[general]{Tools!Volume Utility} + +This document describes the utility programs written to aid Bacula users and +developers in dealing with Volumes external to Bacula. + +\section{Specifying the Configuration File} +\index[general]{Specifying the Configuration File} + +Starting with version 1.27, each of the following programs requires a valid +Storage daemon configuration file (actually, the only part of the +configuration file that these programs need is the {\bf Device} resource +definitions). This permits the programs to find the configuration parameters +for your archive device (generally a tape drive). By default, they read {\bf +bacula-sd.conf} in the current directory, but you may specify a different +configuration file using the {\bf -c} option. + + +\section{Specifying a Device Name For a Tape} +\index[general]{Tape!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a Tape} + +Each of these programs require a {\bf device-name} where the Volume can be +found. In the case of a tape, this is the physical device name such as {\bf +/dev/nst0} or {\bf /dev/rmt/0ubn} depending on your system. For the program to +work, it must find the identical name in the Device resource of the +configuration file. See below for specifying Volume names. + +Please note that if you have Bacula running and you ant to use +one of these programs, you will either need to stop the Storage daemon, or +{\bf unmount} any tape drive you want to use, otherwise the drive +will {\bf busy} because Bacula is using it. + + +\section{Specifying a Device Name For a File} +\index[general]{File!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a File} + +If you are attempting to read or write an archive file rather than a tape, the +{\bf device-name} should be the full path to the archive location including +the filename. The filename (last part of the specification) will be stripped +and used as the Volume name, and the path (first part before the filename) +must have the same entry in the configuration file. So, the path is equivalent +to the archive device name, and the filename is equivalent to the volume name. + + +\section{Specifying Volumes} +\index[general]{Volumes!Specifying} +\index[general]{Specifying Volumes} + +In general, you must specify the Volume name to each of the programs below +(with the exception of {\bf btape}). The best method to do so is to specify a +{\bf bootstrap} file on the command line with the {\bf -b} option. As part of +the bootstrap file, you will then specify the Volume name or Volume names if +more than one volume is needed. For example, suppose you want to read tapes +{\bf tape1} and {\bf tape2}. First construct a {\bf bootstrap} file named say, +{\bf list.bsr} which contains: + +\footnotesize +\begin{verbatim} +Volume=test1|test2 +\end{verbatim} +\normalsize + +where each Volume is separated by a vertical bar. Then simply use: + +\footnotesize +\begin{verbatim} +./bls -b list.bsr /dev/nst0 +\end{verbatim} +\normalsize + +In the case of Bacula Volumes that are on files, you may simply append volumes +as follows: + +\footnotesize +\begin{verbatim} +./bls /tmp/test1\|test2 +\end{verbatim} +\normalsize + +where the backslash (\textbackslash{}) was necessary as a shell escape to +permit entering the vertical bar (|). + +And finally, if you feel that specifying a Volume name is a bit complicated +with a bootstrap file, you can use the {\bf -V} option (on all programs except +{\bf bcopy}) to specify one or more Volume names separated by the vertical bar +(|). For example, + +\footnotesize +\begin{verbatim} +./bls -V Vol001 /dev/nst0 +\end{verbatim} +\normalsize + +You may also specify an asterisk (*) to indicate that the program should +accept any volume. For example: + +\footnotesize +\begin{verbatim} +./bls -V* /dev/nst0 +\end{verbatim} +\normalsize + +\section{bls} +\label{bls} +\index[general]{bls} +\index[general]{program!bls} + +{\bf bls} can be used to do an {\bf ls} type listing of a {\bf Bacula} tape or +file. It is called: + +\footnotesize +\begin{verbatim} +Usage: bls [options] + -b specify a bootstrap file + -c specify a config file + -d specify debug level + -e exclude list + -i include list + -j list jobs + -k list blocks + (no j or k option) list saved files + -L dump label + -p proceed inspite of errors + -v be verbose + -V specify Volume names (separated by |) + -? print this message +\end{verbatim} +\normalsize + +For example, to list the contents of a tape: + +\footnotesize +\begin{verbatim} +./bls -V Volume-name /dev/nst0 +\end{verbatim} +\normalsize + +Or to list the contents of a file: + +\footnotesize +\begin{verbatim} +./bls /tmp/Volume-name +or +./bls -V Volume-name /tmp +\end{verbatim} +\normalsize + +Note that, in the case of a file, the Volume name becomes the filename, so in +the above example, you will replace the {\bf xxx} with the name of the volume +(file) you wrote. + +Normally if no options are specified, {\bf bls} will produce the equivalent +output to the {\bf ls -l} command for each file on the tape. Using other +options listed above, it is possible to display only the Job records, only the +tape blocks, etc. For example: + +\footnotesize +\begin{verbatim} + +./bls /tmp/File002 +bls: butil.c:148 Using device: /tmp +drwxrwxr-x 3 k k 4096 02-10-19 21:08 /home/kern/bacula/k/src/dird/ +drwxrwxr-x 2 k k 4096 02-10-10 18:59 /home/kern/bacula/k/src/dird/CVS/ +-rw-rw-r-- 1 k k 54 02-07-06 18:02 /home/kern/bacula/k/src/dird/CVS/Root +-rw-rw-r-- 1 k k 16 02-07-06 18:02 /home/kern/bacula/k/src/dird/CVS/Repository +-rw-rw-r-- 1 k k 1783 02-10-10 18:59 /home/kern/bacula/k/src/dird/CVS/Entries +-rw-rw-r-- 1 k k 97506 02-10-18 21:07 /home/kern/bacula/k/src/dird/Makefile +-rw-r--r-- 1 k k 3513 02-10-18 21:02 /home/kern/bacula/k/src/dird/Makefile.in +-rw-rw-r-- 1 k k 4669 02-07-06 18:02 /home/kern/bacula/k/src/dird/README-config +-rw-r--r-- 1 k k 4391 02-09-14 16:51 /home/kern/bacula/k/src/dird/authenticate.c +-rw-r--r-- 1 k k 3609 02-07-07 16:41 /home/kern/bacula/k/src/dird/autoprune.c +-rw-rw-r-- 1 k k 4418 02-10-18 21:03 /home/kern/bacula/k/src/dird/bacula-dir.conf +... +-rw-rw-r-- 1 k k 83 02-08-31 19:19 /home/kern/bacula/k/src/dird/.cvsignore +bls: Got EOF on device /tmp +84 files found. +\end{verbatim} +\normalsize + +\subsection{Listing Jobs} +\index[general]{Listing Jobs with bls} +\index[general]{bls!Listing Jobs} + +If you are listing a Volume to determine what Jobs to restore, normally the +{\bf -j} option provides you with most of what you will need as long as you +don't have multiple clients. For example, + +\footnotesize +\begin{verbatim} +./bls -j -V Test1 -c stored.conf DDS-4 +bls: butil.c:258 Using device: "DDS-4" for reading. +11-Jul 11:54 bls: Ready to read from volume "Test1" on device "DDS-4" (/dev/nst0). +Volume Record: File:blk=0:1 SessId=4 SessTime=1121074625 JobId=0 DataLen=165 +Begin Job Session Record: File:blk=0:2 SessId=4 SessTime=1121074625 JobId=1 Level=F Type=B +Begin Job Session Record: File:blk=0:3 SessId=5 SessTime=1121074625 JobId=5 Level=F Type=B +Begin Job Session Record: File:blk=0:6 SessId=3 SessTime=1121074625 JobId=2 Level=F Type=B +Begin Job Session Record: File:blk=0:13 SessId=2 SessTime=1121074625 JobId=4 Level=F Type=B +End Job Session Record: File:blk=0:99 SessId=3 SessTime=1121074625 JobId=2 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:101 SessId=2 SessTime=1121074625 JobId=4 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:108 SessId=5 SessTime=1121074625 JobId=5 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:109 SessId=4 SessTime=1121074625 JobId=1 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +11-Jul 11:54 bls: End of Volume at file 1 on device "DDS-4" (/dev/nst0), Volume "Test1" +11-Jul 11:54 bls: End of all volumes. +\end{verbatim} +\normalsize + +shows a full save followed by two incremental saves. + +Adding the {\bf -v} option will display virtually all information that is +available for each record: + +\subsection{Listing Blocks} +\index[general]{Listing Blocks with bls} +\index[general]{bls!Listing Blocks} + +Normally, except for debugging purposes, you will not need to list Bacula +blocks (the "primitive" unit of Bacula data on the Volume). However, you can +do so with: + +\footnotesize +\begin{verbatim} +./bls -k /tmp/File002 +bls: butil.c:148 Using device: /tmp +Block: 1 size=64512 +Block: 2 size=64512 +... +Block: 65 size=64512 +Block: 66 size=19195 +bls: Got EOF on device /tmp +End of File on device +\end{verbatim} +\normalsize + +By adding the {\bf -v} option, you can get more information, which can be +useful in knowing what sessions were written to the volume: + +\footnotesize +\begin{verbatim} +./bls -k -v /tmp/File002 +Volume Label: +Id : Bacula 0.9 mortal +VerNo : 10 +VolName : File002 +PrevVolName : +VolFile : 0 +LabelType : VOL_LABEL +LabelSize : 147 +PoolName : Default +MediaType : File +PoolType : Backup +HostName : +Date label written: 2002-10-19 at 21:16 +Block: 1 blen=64512 First rec FI=VOL_LABEL SessId=1 SessTim=1035062102 Strm=0 rlen=147 +Block: 2 blen=64512 First rec FI=6 SessId=1 SessTim=1035062102 Strm=DATA rlen=4087 +Block: 3 blen=64512 First rec FI=12 SessId=1 SessTim=1035062102 Strm=DATA rlen=5902 +Block: 4 blen=64512 First rec FI=19 SessId=1 SessTim=1035062102 Strm=DATA rlen=28382 +... +Block: 65 blen=64512 First rec FI=83 SessId=1 SessTim=1035062102 Strm=DATA rlen=1873 +Block: 66 blen=19195 First rec FI=83 SessId=1 SessTim=1035062102 Strm=DATA rlen=2973 +bls: Got EOF on device /tmp +End of File on device +\end{verbatim} +\normalsize + +Armed with the SessionId and the SessionTime, you can extract just about +anything. + +If you want to know even more, add a second {\bf -v} to the command line to +get a dump of every record in every block. + +\footnotesize +\begin{verbatim} +./bls -k -v -v /tmp/File002 +bls: block.c:79 Dump block 80f8ad0: size=64512 BlkNum=1 + Hdrcksum=b1bdfd6d cksum=b1bdfd6d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=VOL_LABEL Strm=0 len=147 p=80f8b40 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=SOS_LABEL Strm=-7 len=122 p=80f8be7 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=1 Strm=UATTR len=86 p=80f8c75 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=2 Strm=UATTR len=90 p=80f8cdf +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=UATTR len=92 p=80f8d4d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=DATA len=54 p=80f8dbd +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=MD5 len=16 p=80f8e07 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=UATTR len=98 p=80f8e2b +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=DATA len=16 p=80f8ea1 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=MD5 len=16 p=80f8ec5 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=UATTR len=96 p=80f8ee9 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=DATA len=1783 p=80f8f5d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=MD5 len=16 p=80f9668 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=UATTR len=95 p=80f968c +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=32768 p=80f96ff +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=32768 p=8101713 +bls: block.c:79 Dump block 80f8ad0: size=64512 BlkNum=2 + Hdrcksum=9acc1e7f cksum=9acc1e7f +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=contDATA len=4087 p=80f8b40 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=31970 p=80f9b4b +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=MD5 len=16 p=8101841 +... +\end{verbatim} +\normalsize + +\section{bextract} +\label{bextract} +\index[general]{Bextract} +\index[general]{program!bextract} + +If you find yourself using {\bf bextract}, you probably have done +something wrong. For example, if you are trying to recover a file +but are having problems, please see the \ilink {Restoring When Things Go +Wrong}{database_restore} section of the Restore chapter of this manual. + +Normally, you will restore files by running a {\bf Restore} Job from the {\bf +Console} program. However, {\bf bextract} can be used to extract a single file +or a list of files from a Bacula tape or file. In fact, {\bf bextract} can be +a useful tool to restore files to an empty system assuming you are able to +boot, you have statically linked {\bf bextract} and you have an appropriate +{\bf bootstrap} file. + +Please note that some of the current limitations of bextract are: + +\begin{enumerate} +\item It cannot restore access control lists (ACL) that have been + backed up along with the file data. +\item It cannot restore Win32 non-portable streams (typically default). +\item It cannot restore encrypted files. +\item The command line length is relatively limited, + which means that you cannot enter a huge number of volumes. If you need to + enter more volumes than the command line supports, please use a bootstrap + file (see below). +\end{enumerate} + + +It is called: + +\footnotesize +\begin{verbatim} + +Usage: bextract [-d debug_level] + -b specify a bootstrap file + -dnn set debug level to nn + -e exclude list + -i include list + -p proceed inspite of I/O errors + -V specify Volume names (separated by |) + -? print this message +\end{verbatim} +\normalsize + +where {\bf device-name} is the Archive Device (raw device name or full +filename) of the device to be read, and {\bf directory-to-store-files} is a +path prefix to prepend to all the files restored. + +NOTE: On Windows systems, if you specify a prefix of say d:/tmp, any file that +would have been restored to {\bf c:/My Documents} will be restored to {\bf +d:/tmp/My Documents}. That is, the original drive specification will be +stripped. If no prefix is specified, the file will be restored to the original +drive. + +\subsection{Extracting with Include or Exclude Lists} +\index[general]{Lists!Extracting with Include or Exclude} +\index[general]{Extracting with Include or Exclude Lists} + +Using the {\bf -e} option, you can specify a file containing a list of files +to be excluded. Wildcards can be used in the exclusion list. This option will +normally be used in conjunction with the {\bf -i} option (see below). Both the +{\bf -e} and the {\bf -i} options may be specified at the same time as the +{\bf -b} option. The bootstrap filters will be applied first, then the include +list, then the exclude list. + +Likewise, and probably more importantly, with the {\bf -i} option, you can +specify a file that contains a list (one file per line) of files and +directories to include to be restored. The list must contain the full filename +with the path. If you specify a path name only, all files and subdirectories +of that path will be restored. If you specify a line containing only the +filename (e.g. {\bf my-file.txt}) it probably will not be extracted because +you have not specified the full path. + +For example, if the file {\bf include-list} contains: + +\footnotesize +\begin{verbatim} +/home/kern/bacula +/usr/local/bin +\end{verbatim} +\normalsize + +Then the command: + +\footnotesize +\begin{verbatim} +./bextract -i include-list -V Volume /dev/nst0 /tmp +\end{verbatim} +\normalsize + +will restore from the Bacula archive {\bf /dev/nst0} all files and directories +in the backup from {\bf /home/kern/bacula} and from {\bf /usr/local/bin}. The +restored files will be placed in a file of the original name under the +directory {\bf /tmp} (i.e. /tmp/home/kern/bacula/... and +/tmp/usr/local/bin/...). + +\subsection{Extracting With a Bootstrap File} +\index[general]{File!Extracting With a Bootstrap} +\index[general]{Extracting With a Bootstrap File} + +The {\bf -b} option is used to specify a {\bf bootstrap} file containing the +information needed to restore precisely the files you want. Specifying a {\bf +bootstrap} file is optional but recommended because it gives you the most +control over which files will be restored. For more details on the {\bf +bootstrap} file, please see +\ilink{Restoring Files with the Bootstrap File}{BootstrapChapter} +chapter of this document. Note, you may also use a bootstrap file produced by +the {\bf restore} command. For example: + +\footnotesize +\begin{verbatim} +./bextract -b bootstrap-file /dev/nst0 /tmp +\end{verbatim} +\normalsize + +The bootstrap file allows detailed specification of what files you want +restored (extracted). You may specify a bootstrap file and include and/or +exclude files at the same time. The bootstrap conditions will first be +applied, and then each file record seen will be compared to the include and +exclude lists. + +\subsection{Extracting From Multiple Volumes} +\index[general]{Volumes!Extracting From Multiple} +\index[general]{Extracting From Multiple Volumes} + +If you wish to extract files that span several Volumes, you can specify the +Volume names in the bootstrap file or you may specify the Volume names on the +command line by separating them with a vertical bar. See the section above +under the {\bf bls} program entitled {\bf Listing Multiple Volumes} for more +information. The same techniques apply equally well to the {\bf bextract} +program or read the \ilink{Bootstrap}{BootstrapChapter} +chapter of this document. + +\section{bscan} +\label{bscan} +\index[general]{bscan} +\index[general]{program!bscan} + +If you find yourself using this program, you have probably done something +wrong. For example, the best way to recover a lost or damaged Bacula +database is to reload the database by using the bootstrap file that +was written when you saved it (default bacula-dir.conf file). + +The {\bf bscan} program can be used to re-create a database (catalog) +records from the backup information written to one or more Volumes. +This is normally +needed only if one or more Volumes have been pruned or purged from your +catalog so that the records on the Volume are no longer in the catalog, or +for Volumes that you have archived. + +With some care, it can also be used to synchronize your existing catalog with +a Volume. Although we have never seen a case of bscan damaging a +catalog, since bscan modifies your catalog, we recommend that +you do a simple ASCII backup of your database before running {\bf bscan} just +to be sure. See \ilink{Compacting Your Database}{CompactingMySQL} for +the details of making a copy of your database. + +{\bf bscan} can also be useful in a disaster recovery situation, after the +loss of a hard disk, if you do not have a valid {\bf bootstrap} file for +reloading your system, or if a Volume has been recycled but not overwritten, +you can use {\bf bscan} to re-create your database, which can then be used to +{\bf restore} your system or a file to its previous state. + +It is called: + +\footnotesize +\begin{verbatim} + +Usage: bscan [options] + -b bootstrap specify a bootstrap file + -c specify configuration file + -d set debug level to nn + -m update media info in database + -n specify the database name (default bacula) + -u specify database user name (default bacula) + -P specify database password (default none) + -h specify database host (default NULL) + -p proceed inspite of I/O errors + -r list records + -s synchronize or store in database + -v verbose + -V specify Volume names (separated by |) + -w specify working directory (default from conf file) + -? print this message +\end{verbatim} +\normalsize + +If you are using MySQL or PostgreSQL, there is no need to supply a working +directory since in that case, bscan knows where the databases are. However, if +you have provided security on your database, you may need to supply either the +database name ({\bf -b} option), the user name ({\bf -u} option), and/or the +password ({\bf -p}) options. + +NOTE: before {\bf bscan} can work, it needs at least a bare bones valid +database. If your database exists but some records are missing because +they were pruned, then you are all set. If your database was lost or +destroyed, then you must first ensure that you have the SQL program running +(MySQL or PostgreSQL), then you must create the Bacula database (normally +named bacula), and you must create the Bacula tables using the scripts in +the {\bf cats} directory. This is explained in the +\ilink{Installation}{CreateDatabase} chapter of the manual. Finally, before +scanning into an empty database, you must start and stop the Director with +the appropriate bacula-dir.conf file so that it can create the Client and +Storage records which are not stored on the Volumes. Without these +records, scanning is unable to connect the Job records to the proper +client. + +Forgetting for the moment the extra complications of a full rebuild of +your catalog, let's suppose that you did a backup to Volumes "Vol001" +and "Vol002", then sometime later all records of one or both those +Volumes were pruned or purged from the +database. By using {\bf bscan} you can recreate the catalog entries for +those Volumes and then use the {\bf restore} command in the Console to restore +whatever you want. A command something like: + +\footnotesize +\begin{verbatim} +bscan -c bacula-sd.conf -v -V Vol001\|Vol002 /dev/nst0 +\end{verbatim} +\normalsize + +will give you an idea of what is going to happen without changing +your catalog. Of course, you may need to change the path to the Storage +daemon's conf file, the Volume name, and your tape (or disk) device name. This +command must read the entire tape, so if it has a lot of data, it may take a +long time, and thus you might want to immediately use the command listed +below. Note, if you are writing to a disk file, replace the device name with +the path to the directory that contains the Volumes. This must correspond to +the Archive Device in the conf file. + +Then to actually write or store the records in the catalog, add the {\bf -s} +option as follows: + +\footnotesize +\begin{verbatim} + bscan -s -m -c bacula-sd.conf -v -V Vol001\|Vol002 /dev/nst0 +\end{verbatim} +\normalsize + +When writing to the database, if bscan finds existing records, it will +generally either update them if something is wrong or leave them alone. Thus +if the Volumes you are scanning are all or partially in the catalog already, no +harm will be done to that existing data. Any missing data will simply be +added. + +If you have multiple tapes, you should scan them with: + +\footnotesize +\begin{verbatim} + bscan -s -m -c bacula-sd.conf -v -V Vol001\|Vol002\|Vol003 /dev/nst0 +\end{verbatim} +\normalsize + +Since there is a limit on the command line length (511 bytes) accepted +by {\bf bscan}, if you have too many Volumes, you will need to manually +create a bootstrap file. See the \ilink{Bootstrap}{BootstrapChapter} +chapter of this manual for more details, in particular the section +entitled \ilink{Bootstrap for bscan}{bscanBootstrap}. + +You should, always try to specify the tapes in the order they are written. +However, bscan can handle scanning tapes that are not sequential. Any +incomplete records at the end of the tape will simply be ignored in that +case. If you are simply repairing an existing catalog, this may be OK, but +if you are creating a new catalog from scratch, it will leave your database +in an incorrect state. If you do not specify all necessary Volumes on a +single bscan command, bscan will not be able to correctly restore the +records that span two volumes. In other words, it is much better to +specify two or three volumes on a single bscan command rather than run +bscan two or three times, each with a single volume. + + +Note, the restoration process using bscan is not identical to the original +creation of the catalog data. This is because certain data such as Client +records and other non-essential data such +as volume reads, volume mounts, etc is not stored on the Volume, and thus is +not restored by bscan. The results of bscanning are, however, perfectly valid, +and will permit restoration of any or all the files in the catalog using the +normal Bacula console commands. If you are starting with an empty catalog +and expecting bscan to reconstruct it, you may be a bit disappointed, but +at a minimum, you must ensure that your bacula-dir.conf file is the same +as what it previously was -- that is, it must contain all the appropriate +Client resources so that they will be recreated in your new database {\bf +before} running bscan. Normally when the Director starts, it will recreate +any missing Client records in the catalog. Another problem you will have +is that even if the Volumes (Media records) are recreated in the database, +they will not have their autochanger status and slots properly set. As a +result, you will need to repair that by using the {\bf update slots} +command. There may be other considerations as well. Rather than +bscanning, you should always attempt to recover you previous catalog +backup. + + +\subsection{Using bscan to Compare a Volume to an existing Catalog} +\index[general]{Catalog!Using bscan to Compare a Volume to an existing} +\index[general]{Using bscan to Compare a Volume to an existing Catalog} + +If you wish to compare the contents of a Volume to an existing catalog without +changing the catalog, you can safely do so if and only if you do {\bf not} +specify either the {\bf -m} or the {\bf -s} options. However, at this time +(Bacula version 1.26), the comparison routines are not as good or as thorough +as they should be, so we don't particularly recommend this mode other than for +testing. + +\subsection{Using bscan to Recreate a Catalog from a Volume} +\index[general]{Volume!Using bscan to Recreate a Catalog from a Volume} +\index[general]{Using bscan to Recreate a Catalog from a Volume} + +This is the mode for which {\bf bscan} is most useful. You can either {\bf +bscan} into a freshly created catalog, or directly into your existing catalog +(after having made an ASCII copy as described above). Normally, you should +start with a freshly created catalog that contains no data. + +Starting with a single Volume named {\bf TestVolume1}, you run a command such +as: + +\footnotesize +\begin{verbatim} +./bscan -V TestVolume1 -v -s -m -c bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +If there is more than one volume, simply append it to the first one separating +it with a vertical bar. You may need to precede the vertical bar with a +forward slash escape the shell -- e.g. {\bf +TestVolume1\textbackslash{}|TestVolume2}. The {\bf -v} option was added for +verbose output (this can be omitted if desired). The {\bf -s} option that +tells {\bf bscan} to store information in the database. The physical device +name {\bf /dev/nst0} is specified after all the options. + +{\bf} For example, after having done a full backup of a directory, then two +incrementals, I reinitialized the SQLite database as described above, and +using the bootstrap.bsr file noted above, I entered the following command: + +\footnotesize +\begin{verbatim} +./bscan -b bootstrap.bsr -v -s -c bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +which produced the following output: + +\footnotesize +\begin{verbatim} +bscan: bscan.c:182 Using Database: bacula, User: bacula +bscan: bscan.c:673 Created Pool record for Pool: Default +bscan: bscan.c:271 Pool type "Backup" is OK. +bscan: bscan.c:632 Created Media record for Volume: TestVolume1 +bscan: bscan.c:298 Media type "DDS-4" is OK. +bscan: bscan.c:307 VOL_LABEL: OK for Volume: TestVolume1 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=1 record for original JobId=2 +bscan: bscan.c:717 Created FileSet record "Kerns Files" +bscan: bscan.c:819 Updated Job termination record for new JobId=1 +bscan: bscan.c:905 Created JobMedia record JobId 1, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=2 record for original JobId=3 +bscan: bscan.c:708 Fileset "Kerns Files" already exists. +bscan: bscan.c:819 Updated Job termination record for new JobId=2 +bscan: bscan.c:905 Created JobMedia record JobId 2, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=3 record for original JobId=4 +bscan: bscan.c:708 Fileset "Kerns Files" already exists. +bscan: bscan.c:819 Updated Job termination record for new JobId=3 +bscan: bscan.c:905 Created JobMedia record JobId 3, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:652 Updated Media record at end of Volume: TestVolume1 +bscan: bscan.c:428 End of Volume. VolFiles=3 VolBlocks=57 VolBytes=10,027,437 +\end{verbatim} +\normalsize + +The key points to note are that {\bf bscan} prints a line when each major +record is created. Due to the volume of output, it does not print a line for +each file record unless you supply the {\bf -v} option twice or more on the +command line. + +In the case of a Job record, the new JobId will not normally be the same as +the original Jobid. For example, for the first JobId above, the new JobId is +1, but the original JobId is 2. This is nothing to be concerned about as it is +the normal nature of databases. {\bf bscan} will keep everything straight. + +Although {\bf bscan} claims that it created a Client record for Client: Rufus +three times, it was actually only created the first time. This is normal. + +You will also notice that it read an end of file after each Job (Got EOF on +device ...). Finally the last line gives the total statistics for the bscan. + +If you had added a second {\bf -v} option to the command line, Bacula would +have been even more verbose, dumping virtually all the details of each Job +record it encountered. + +Now if you start Bacula and enter a {\bf list jobs} command to the console +program, you will get: + +\footnotesize +\begin{verbatim} ++-------+----------+------------------+------+-----+----------+----------+---------+ +| JobId | Name | StartTime | Type | Lvl | JobFiles | JobBytes | JobStat | ++-------+----------+------------------+------+-----+----------+----------+---------+ +| 1 | kernsave | 2002-10-07 14:59 | B | F | 84 | 4180207 | T | +| 2 | kernsave | 2002-10-07 15:00 | B | I | 15 | 2170314 | T | +| 3 | kernsave | 2002-10-07 15:01 | B | I | 33 | 3662184 | T | ++-------+----------+------------------+------+-----+----------+----------+---------+ +\end{verbatim} +\normalsize + +which corresponds virtually identically with what the database contained +before it was re-initialized and restored with bscan. All the Jobs and Files +found on the tape are restored including most of the Media record. The Volume +(Media) records restored will be marked as {\bf Full} so that they cannot be +rewritten without operator intervention. + +It should be noted that {\bf bscan} cannot restore a database to the exact +condition it was in previously because a lot of the less important information +contained in the database is not saved to the tape. Nevertheless, the +reconstruction is sufficiently complete, that you can run {\bf restore} +against it and get valid results. + +An interesting aspect of restoring a catalog backup using {\bf bscan} is +that the backup was made while Bacula was running and writing to a tape. At +the point the backup of the catalog is made, the tape Bacula is writing to +will have say 10 files on it, but after the catalog backup is made, there +will be 11 files on the tape Bacula is writing. This there is a difference +between what is contained in the backed up catalog and what is actually on +the tape. If after restoring a catalog, you attempt to write on the same +tape that was used to backup the catalog, Bacula will detect the difference +in the number of files registered in the catalog compared to what is on the +tape, and will mark the tape in error. + +There are two solutions to this problem. The first is possibly the simplest +and is to mark the volume as Used before doing any backups. The second is +to manually correct the number of files listed in the Media record of the +catalog. This procedure is documented elsewhere in the manual and involves +using the {\bf update volume} command in {\bf bconsole}. + +\subsection{Using bscan to Correct the Volume File Count} +\index[general]{Using bscan to Correct the Volume File Count} +\index[general]{Count!Using bscan to Correct the Volume File Count} + +If the Storage daemon crashes during a backup Job, the catalog will not be +properly updated for the Volume being used at the time of the crash. This +means that the Storage daemon will have written say 20 files on the tape, but +the catalog record for the Volume indicates only 19 files. + +Bacula refuses to write on a tape that contains a different number of files +from what is in the catalog. To correct this situation, you may run a {\bf +bscan} with the {\bf -m} option (but {\bf without} the {\bf -s} option) to +update only the final Media record for the Volumes read. + +\subsection{After bscan} +\index[general]{After bscan} +\index[general]{Bscan!After} + +If you use {\bf bscan} to enter the contents of the Volume into an existing +catalog, you should be aware that the records you entered may be immediately +pruned during the next job, particularly if the Volume is very old or had been +previously purged. To avoid this, after running {\bf bscan}, you can manually +set the volume status (VolStatus) to {\bf Read-Only} by using the {\bf update} +command in the catalog. This will allow you to restore from the volume without +having it immediately purged. When you have restored and backed up the data, +you can reset the VolStatus to {\bf Used} and the Volume will be purged from +the catalog. + +\section{bcopy} +\label{bcopy} +\index[general]{Bcopy} +\index[general]{program!bcopy} + +The {\bf bcopy} program can be used to copy one {\bf Bacula} archive file to +another. For example, you may copy a tape to a file, a file to a tape, a file +to a file, or a tape to a tape. For tape to tape, you will need two tape +drives. (a later version is planned that will buffer it to disk). In the +process of making the copy, no record of the information written to the new +Volume is stored in the catalog. This means that the new Volume, though it +contains valid backup data, cannot be accessed directly from existing catalog +entries. If you wish to be able to use the Volume with the Console restore +command, for example, you must first bscan the new Volume into the catalog. + +\subsection{bcopy Command Options} +\index[general]{Options!bcopy Command} +\index[general]{Bcopy Command Options} + +\footnotesize +\begin{verbatim} +Usage: bcopy [-d debug_level] + -b bootstrap specify a bootstrap file + -c specify configuration file + -dnn set debug level to nn + -i specify input Volume names (separated by |) + -o specify output Volume names (separated by |) + -p proceed inspite of I/O errors + -v verbose + -w dir specify working directory (default /tmp) + -? print this message +\end{verbatim} +\normalsize + +By using a {\bf bootstrap} file, you can copy parts of a Bacula archive file +to another archive. + +One of the objectives of this program is to be able to recover as much data as +possible from a damaged tape. However, the current version does not yet have +this feature. + +As this is a new program, any feedback on its use would be appreciated. In +addition, I only have a single tape drive, so I have never been able to test +this program with two tape drives. + +\section{btape} +\label{btape} +\index[general]{Btape} +\index[general]{program!btape} + +This program permits a number of elementary tape operations via a tty command +interface. It works only with tapes and not with other kinds of Bacula +storage media (DVD, File, ...). The {\bf test} command, described below, +can be very useful for testing older tape drive compatibility problems. +Aside from initial testing of tape drive compatibility with {\bf Bacula}, +{\bf btape} will be mostly used by developers writing new tape drivers. + +{\bf btape} can be dangerous to use with existing {\bf Bacula} tapes because +it will relabel a tape or write on the tape if so requested regardless that +the tape may contain valuable data, so please be careful and use it only on +blank tapes. + +To work properly, {\bf btape} needs to read the Storage daemon's configuration +file. As a default, it will look for {\bf bacula-sd.conf} in the current +directory. If your configuration file is elsewhere, please use the {\bf -c} +option to specify where. + +The physical device name must be specified on the command line, and this +same device name must be present in the Storage daemon's configuration file +read by {\bf btape} + +\footnotesize +\begin{verbatim} +Usage: btape + -b specify bootstrap file + -c set configuration file to file + -d set debug level to nn + -p proceed inspite of I/O errors + -s turn off signals + -v be verbose + -? print this message. +\end{verbatim} +\normalsize + +\subsection{Using btape to Verify your Tape Drive} +\index[general]{Using btape to Verify your Tape Drive} +\index[general]{Drive!Using btape to Verify your Tape} + +An important reason for this program is to ensure that a Storage daemon +configuration file is defined so that Bacula will correctly read and write +tapes. + +It is highly recommended that you run the {\bf test} command before running +your first Bacula job to ensure that the parameters you have defined for your +storage device (tape drive) will permit {\bf Bacula} to function properly. You +only need to mount a blank tape, enter the command, and the output should be +reasonably self explanatory. Please see the +\ilink{Tape Testing}{TapeTestingChapter} Chapter of this manual for +the details. + +\subsection{btape Commands} +\index[general]{Btape Commands} +\index[general]{Commands!btape} + +The full list of commands are: + +\footnotesize +\begin{verbatim} + Command Description + ======= =========== + autochanger test autochanger + bsf backspace file + bsr backspace record + cap list device capabilities + clear clear tape errors + eod go to end of Bacula data for append + eom go to the physical end of medium + fill fill tape, write onto second volume + unfill read filled tape + fsf forward space a file + fsr forward space a record + help print this command + label write a Bacula label to the tape + load load a tape + quit quit btape + rawfill use write() to fill tape + readlabel read and print the Bacula tape label + rectest test record handling functions + rewind rewind the tape + scan read() tape block by block to EOT and report + scanblocks Bacula read block by block to EOT and report + status print tape status + test General test Bacula tape functions + weof write an EOF on the tape + wr write a single Bacula block + rr read a single record + qfill quick fill command +\end{verbatim} +\normalsize + +The most useful commands are: + +\begin{itemize} +\item test -- test writing records and EOF marks and reading them back. +\item fill -- completely fill a volume with records, then write a few records + on a second volume, and finally, both volumes will be read back. + This command writes blocks containing random data, so your drive will + not be able to compress the data, and thus it is a good test of + the real physical capacity of your tapes. +\item readlabel -- read and dump the label on a Bacula tape. +\item cap -- list the device capabilities as defined in the configuration + file and as perceived by the Storage daemon. + \end{itemize} + +The {\bf readlabel} command can be used to display the details of a Bacula +tape label. This can be useful if the physical tape label was lost or damaged. + + +In the event that you want to relabel a {\bf Bacula}, you can simply use the +{\bf label} command which will write over any existing label. However, please +note for labeling tapes, we recommend that you use the {\bf label} command in +the {\bf Console} program since it will never overwrite a valid Bacula tape. + +\section{Other Programs} +\index[general]{Programs!Other} +\index[general]{Other Programs} + +The following programs are general utility programs and in general do not need +a configuration file nor a device name. + +\section{bsmtp} +\label{bsmtp} +\index[general]{Bsmtp} +\index[general]{program!bsmtp} + +{\bf bsmtp} is a simple mail transport program that permits more flexibility +than the standard mail programs typically found on Unix systems. It can even +be used on Windows machines. + +It is called: + +\footnotesize +\begin{verbatim} +Usage: bsmtp [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...] + -c set the Cc: field + -dnn set debug level to nn + -f set the From: field + -h use mailhost:port as the bsmtp server + -l limit the lines accepted to nn + -s set the Subject: field + -? print this message. +\end{verbatim} +\normalsize + +If the {\bf -f} option is not specified, {\bf bsmtp} will use your userid. If +the option {\bf -h} is not specified {\bf bsmtp} will use the value in the environment +variable {\bf bsmtpSERVER} or if there is none {\bf localhost}. By default +port 25 is used. + +If a line count limit is set with the {\bf -l} option, {\bf bsmtp} will +not send an email with a body text exceeding that number of lines. This +is especially useful for large restore job reports where the list of +files restored might produce very long mails your mail-server would +refuse or crash. However, be aware that you will probably suppress the +job report and any error messages unless you check the log file written +by the Director (see the messages resource in this manual for details). + + +{\bf recipients} is a space separated list of email recipients. + +The body of the email message is read from standard input. + +An example of the use of {\bf bsmtp} would be to put the following statement +in the {\bf Messages} resource of your {\bf bacula-dir.conf} file. Note, these +commands should appear on a single line each. + +\footnotesize +\begin{verbatim} + mailcommand = "/home/bacula/bin/bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "/home/bacula/bin/bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" +\end{verbatim} +\normalsize + +Where you replace {\bf /home/bacula/bin} with the path to your {\bf Bacula} +binary directory, and you replace {\bf mail.domain.com} with the fully +qualified name of your bsmtp (email) server, which normally listens on port +25. For more details on the substitution characters (e.g. \%r) used in the +above line, please see the documentation of the +\ilink{ MailCommand in the Messages Resource}{mailcommand} +chapter of this manual. + +It is HIGHLY recommended that you test one or two cases by hand to make sure +that the {\bf mailhost} that you specified is correct and that it will accept +your email requests. Since {\bf bsmtp} always uses a TCP connection rather +than writing in the spool file, you may find that your {\bf from} address is +being rejected because it does not contain a valid domain, or because your +message is caught in your spam filtering rules. Generally, you should specify +a fully qualified domain name in the {\bf from} field, and depending on +whether your bsmtp gateway is Exim or Sendmail, you may need to modify the +syntax of the from part of the message. Please test. + +When running {\bf bsmtp} by hand, you will need to terminate the message by +entering a ctl-d in column 1 of the last line. +% TODO: is "column" the correct terminology for this? + +If you are getting incorrect dates (e.g. 1970) and you are +running with a non-English language setting, you might try adding +a LANG=''en\_US'' immediately before the bsmtp call. + +\section{dbcheck} +\label{dbcheck} +\index[general]{Dbcheck} +\index[general]{program!dbcheck} +{\bf dbcheck} is a simple program that will search for logical +inconsistencies in the Bacula tables in your database, and optionally fix them. +It is a database maintenance routine, in the sense that it can +detect and remove unused rows, but it is not a database repair +routine. To repair a database, see the tools furnished by the +database vendor. Normally dbcheck should never need to be run, +but if Bacula has crashed or you have a lot of Clients, Pools, or +Jobs that you have removed, it could be useful. + +The {\bf dbcheck} program can be found in +the {\bf \lt{}bacula-source\gt{}/src/tools} directory of the source +distribution. Though it is built with the make process, it is not normally +"installed". + +It is called: + +\footnotesize +\begin{verbatim} +Usage: dbcheck [-c config] [-C catalog name] [-d debug_level] + [] + -b batch mode + -C catalog name in the director conf file + -c director conf filename + -dnn set debug level to nn + -f fix inconsistencies + -v verbose + -? print this message +\end{verbatim} +\normalsize + +If the {\bf -c} option is given with the Director's conf file, there is no +need to enter any of the command line arguments, in particular the working +directory as dbcheck will read them from the file. + +If the {\bf -f} option is specified, {\bf dbcheck} will repair ({\bf fix}) the +inconsistencies it finds. Otherwise, it will report only. + +If the {\bf -b} option is specified, {\bf dbcheck} will run in batch mode, and +it will proceed to examine and fix (if -f is set) all programmed inconsistency +checks. If the {\bf -b} option is not specified, {\bf dbcheck} will enter +interactive mode and prompt with the following: + +\footnotesize +\begin{verbatim} +Hello, this is the database check/correct program. +Please select the function you want to perform. + 1) Toggle modify database flag + 2) Toggle verbose flag + 3) Repair bad Filename records + 4) Repair bad Path records + 5) Eliminate duplicate Filename records + 6) Eliminate duplicate Path records + 7) Eliminate orphaned Jobmedia records + 8) Eliminate orphaned File records + 9) Eliminate orphaned Path records + 10) Eliminate orphaned Filename records + 11) Eliminate orphaned FileSet records + 12) Eliminate orphaned Client records + 13) Eliminate orphaned Job records + 14) Eliminate all Admin records + 15) Eliminate all Restore records + 16) All (3-15) + 17) Quit +Select function number: +\end{verbatim} +\normalsize + +By entering 1 or 2, you can toggle the modify database flag (-f option) and +the verbose flag (-v). It can be helpful and reassuring to turn off the modify +database flag, then select one or more of the consistency checks (items 3 +through 9) to see what will be done, then toggle the modify flag on and re-run +the check. + +The inconsistencies examined are the following: + +\begin{itemize} +\item Duplicate filename records. This can happen if you accidentally run two + copies of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an inconsistent + database. If this is the case, you will receive error messages during Jobs + warning of duplicate database records. If you are not getting these error + messages, there is no reason to run this check. +\item Repair bad Filename records. This checks and corrects filenames that + have a trailing slash. They should not. +\item Repair bad Path records. This checks and corrects path names that do + not have a trailing slash. They should. +\item Duplicate path records. This can happen if you accidentally run two + copies of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an inconsistent + database. See the item above for why this occurs and how you know it is + happening. +\item Orphaned JobMedia records. This happens when a Job record is deleted + (perhaps by a user issued SQL statement), but the corresponding JobMedia + record (one for each Volume used in the Job) was not deleted. Normally, this + should not happen, and even if it does, these records generally do not take + much space in your database. However, by running this check, you can + eliminate any such orphans. +\item Orphaned File records. This happens when a Job record is deleted + (perhaps by a user issued SQL statement), but the corresponding File record + (one for each Volume used in the Job) was not deleted. Note, searching for + these records can be {\bf very} time consuming (i.e. it may take hours) for a + large database. Normally this should not happen as Bacula takes care to + prevent it. Just the same, this check can remove any orphaned File records. + It is recommended that you run this once a year since orphaned File records + can take a large amount of space in your database. You might + want to ensure that you have indexes on JobId, FilenameId, and + PathId for the File table in your catalog before running this + command. +\item Orphaned Path records. This condition happens any time a directory is + deleted from your system and all associated Job records have been purged. + During standard purging (or pruning) of Job records, Bacula does not check + for orphaned Path records. As a consequence, over a period of time, old + unused Path records will tend to accumulate and use space in your database. + This check will eliminate them. It is recommended that you run this + check at least once a year. +\item Orphaned Filename records. This condition happens any time a file is + deleted from your system and all associated Job records have been purged. + This can happen quite frequently as there are quite a large number of files + that are created and then deleted. In addition, if you do a system update or + delete an entire directory, there can be a very large number of Filename + records that remain in the catalog but are no longer used. + + During standard purging (or pruning) of Job records, Bacula does not check + for orphaned Filename records. As a consequence, over a period of time, old + unused Filename records will accumulate and use space in your database. This + check will eliminate them. It is strongly recommended that you run this check + at least once a year, and for large database (more than 200 Megabytes), it is + probably better to run this once every 6 months. +\item Orphaned Client records. These records can remain in the database long + after you have removed a client. +\item Orphaned Job records. If no client is defined for a job or you do not + run a job for a long time, you can accumulate old job records. This option + allow you to remove jobs that are not attached to any client (and thus + useless). +\item All Admin records. This command will remove all Admin records, + regardless of their age. +\item All Restore records. This command will remove all Restore records, + regardless of their age. +\end{itemize} + +By the way, I personally run dbcheck only where I have messed up +my database due to a bug in developing Bacula code, so normally +you should never need to run dbcheck in spite of the +recommendations given above, which are given so that users don't +waste their time running dbcheck too often. + +\section{bregex} +\label{bregex} +\index[general]{bregex} +\index[general]{program!bregex} + +{\bf bregex} is a simple program that will allow you to test +regular expressions against a file of data. This can be useful +because the regex libraries on most systems differ, and in +addition, regex expressions can be complicated. + +{\bf bregex} is found in the src/tools directory and it is +normally installed with your system binaries. To run it, use: + +\begin{verbatim} +Usage: bregex [-d debug_level] -f + -f specify file of data to be matched + -l suppress line numbers + -n print lines that do not match + -? print this message. +\end{verbatim} + +The \lt{}data-file\gt{} is a filename that contains lines +of data to be matched (or not) against one or more patterns. +When the program is run, it will prompt you for a regular +expression pattern, then apply it one line at a time against +the data in the file. Each line that matches will be printed +preceded by its line number. You will then be prompted again +for another pattern. + +Enter an empty line for a pattern to terminate the program. You +can print only lines that do not match by using the -n option, +and you can suppress printing of line numbers with the -l option. + +This program can be useful for testing regex expressions to be +applied against a list of filenames. + +\section{bwild} +\label{bwild} +\index[general]{bwild} +\index[general]{program!bwild} + +{\bf bwild} is a simple program that will allow you to test +wild-card expressions against a file of data. + +{\bf bwild} is found in the src/tools directory and it is +normally installed with your system binaries. To run it, use: + +\begin{verbatim} +Usage: bwild [-d debug_level] -f + -f specify file of data to be matched + -l suppress line numbers + -n print lines that do not match + -? print this message. +\end{verbatim} + +The \lt{}data-file\gt{} is a filename that contains lines +of data to be matched (or not) against one or more patterns. +When the program is run, it will prompt you for a wild-card +pattern, then apply it one line at a time against +the data in the file. Each line that matches will be printed +preceded by its line number. You will then be prompted again +for another pattern. + +Enter an empty line for a pattern to terminate the program. You +can print only lines that do not match by using the -n option, +and you can suppress printing of line numbers with the -l option. + +This program can be useful for testing wild expressions to be +applied against a list of filenames. + +\section{testfind} +\label{testfind} +\index[general]{Testfind} +\index[general]{program!testfind} + +{\bf testfind} permits listing of files using the same search engine that is +used for the {\bf Include} resource in Job resources. Note, much of the +functionality of this program (listing of files to be included) is present in +the +\ilink{estimate command}{estimate} in the Console program. + +The original use of testfind was to ensure that Bacula's file search engine +was correct and to print some statistics on file name and path length. +However, you may find it useful to see what bacula would do with a given {\bf +Include} resource. The {\bf testfind} program can be found in the {\bf +\lt{}bacula-source\gt{}/src/tools} directory of the source distribution. +Though it is built with the make process, it is not normally "installed". + +It is called: + +\footnotesize +\begin{verbatim} +Usage: testfind [-d debug_level] [-] [pattern1 ...] + -a print extended attributes (Win32 debug) + -dnn set debug level to nn + - read pattern(s) from stdin + -? print this message. +Patterns are used for file inclusion -- normally directories. +Debug level>= 1 prints each file found. +Debug level>= 10 prints path/file for catalog. +Errors are always printed. +Files/paths truncated is a number with len> 255. +Truncation is only in the catalog. +\end{verbatim} +\normalsize + +Where a pattern is any filename specification that is valid within an {\bf +Include} resource definition. If none is specified, {\bf /} (the root +directory) is assumed. For example: + +\footnotesize +\begin{verbatim} +./testfind /bin +\end{verbatim} +\normalsize + +Would print the following: + +\footnotesize +\begin{verbatim} +Dir: /bin +Reg: /bin/bash +Lnk: /bin/bash2 -> bash +Lnk: /bin/sh -> bash +Reg: /bin/cpio +Reg: /bin/ed +Lnk: /bin/red -> ed +Reg: /bin/chgrp +... +Reg: /bin/ipcalc +Reg: /bin/usleep +Reg: /bin/aumix-minimal +Reg: /bin/mt +Lnka: /bin/gawk-3.1.0 -> /bin/gawk +Reg: /bin/pgawk +Total files : 85 +Max file length: 13 +Max path length: 5 +Files truncated: 0 +Paths truncated: 0 +\end{verbatim} +\normalsize + +Even though {\bf testfind} uses the same search engine as {\bf Bacula}, each +directory to be listed, must be entered as a separate command line entry or +entered one line at a time to standard input if the {\bf -} option was +specified. + +Specifying a debug level of one (i.e. {\bf -d1}) on the command line will +cause {\bf testfind} to print the raw filenames without showing the Bacula +internal file type, or the link (if any). Debug levels of 10 or greater cause +the filename and the path to be separated using the same algorithm that is +used when putting filenames into the Catalog database. diff --git a/docs/manuals/de/utility/rpm-faq.tex b/docs/manuals/de/utility/rpm-faq.tex new file mode 100644 index 00000000..1e37cc59 --- /dev/null +++ b/docs/manuals/de/utility/rpm-faq.tex @@ -0,0 +1,394 @@ +%% +%% + +\chapter{Bacula RPM Packaging FAQ} +\label{RpmFaqChapter} +\index[general]{FAQ!Bacula\textsuperscript{\textregistered} - RPM Packaging } +\index[general]{Bacula\textsuperscript{\textregistered} - RPM Packaging FAQ } + +\begin{enumerate} +\item + \ilink{How do I build Bacula for platform xxx?}{faq1} +\item + \ilink{How do I control which database support gets built?}{faq2} + +\item + \ilink{What other defines are used?}{faq3} +\item + \ilink{I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?}{faq4} +\item + \ilink{I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called + /usr/afsws/bin/pagsh.}{faq5} +\item + \ilink{I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?}{faq6} +\item + \ilink{Is there an easier way than sorting out all these command line options?}{faq7} +\item + \ilink{I just upgraded from 1.36.x to 1.38.x and now my director daemon won't start. It appears to start but dies silently and I get a "connection refused" error when starting the console. What is wrong?}{faq8} +\item + \ilink{There are a lot of rpm packages. Which packages do I need for what?}{faq9} +\end{enumerate} + +\section{Answers} +\index[general]{Answers } + +\begin{enumerate} +\item + \label{faq1} + {\bf How do I build Bacula for platform xxx?} + The bacula spec file contains defines to build for several platforms: + Red Hat 7.x (rh7), Red Hat 8.0 (rh8), Red Hat 9 (rh9), Fedora Core (fc1, + fc3, fc4, fc5, fc6, fc7), Whitebox Enterprise Linux 3.0 (wb3), Red Hat Enterprise Linux + (rhel3, rhel4, rhel5), Mandrake 10.x (mdk), Mandriva 2006.x (mdv) CentOS (centos3, centos4, centos5) + Scientific Linux (sl3, sl4, sl5) and SuSE (su9, su10, su102, su103). The package build is controlled by a mandatory define set at the beginning of the file. These defines basically just control the dependency information that gets coded into the finished rpm package as well + as any special configure options required. The platform define may be edited + in the spec file directly (by default all defines are set to 0 or "not set"). + For example, to build the Red Hat 7.x package find the line in the spec file + which reads + +\footnotesize +\begin{verbatim} + %define rh7 0 + +\end{verbatim} +\normalsize + +and edit it to read + +\footnotesize +\begin{verbatim} + %define rh7 1 + +\end{verbatim} +\normalsize + +Alternately you may pass the define on the command line when calling rpmbuild: + + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" bacula.spec + rpmbuild --rebuild --define build_rh7 1" bacula-x.x.x-x.src.rpm + +\end{verbatim} +\normalsize + +\item + \label{faq2} + {\bf How do I control which database support gets built?} + Another mandatory build define controls which database support is compiled, + one of build\_sqlite, build\_mysql or build\_postgresql. To get the MySQL + package and support either set the + +\footnotesize +\begin{verbatim} + %define mysql 0 + OR + %define mysql4 0 + OR + %define mysql5 0 + +\end{verbatim} +\normalsize + +to + +\footnotesize +\begin{verbatim} + %define mysql 1 + OR + %define mysql4 1 + OR + %define mysql5 1 + +\end{verbatim} +\normalsize + +in the spec file directly or pass it to rpmbuild on the command line: + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" --define "build_mysql 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql4 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql5 1" bacula.spec + +\end{verbatim} +\normalsize + +\item + \label{faq3} + {\bf What other defines are used?} + Three other building defines of note are the depkgs\_version, docs\_version and + \_rescuever identifiers. These two defines are set with each release and must + match the version of those sources that are being used to build the packages. + You would not ordinarily need to edit these. See also the Build Options section + below for other build time options that can be passed on the command line. +\item + \label{faq4} + {\bf I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?} + No, you do not need to be root and, in fact, it is better practice to + build rpm packages as a non-root user. Bacula packages are designed to + be built by a regular user but you must make a few changes on your + system to do this. If you are building on your own system then the + simplest method is to add write permissions for all to the build + directory (/usr/src/redhat/, /usr/src/RPM or /usr/src/packages). + To accomplish this, execute the following command as root: + +\footnotesize +\begin{verbatim} + chmod -R 777 /usr/src/redhat + chmod -R 777 /usr/src/RPM + chmod -R 777 /usr/src/packages + +\end{verbatim} +\normalsize + +If you are working on a shared system where you can not use the method +above then you need to recreate the appropriate above directory tree with all +of its subdirectories inside your home directory. Then create a file named + +{\tt .rpmmacros} + +in your home directory (or edit the file if it already exists) +and add the following line: + +\footnotesize +\begin{verbatim} + %_topdir /home/myuser/redhat + +\end{verbatim} +\normalsize + +Another handy directive for the .rpmmacros file if you wish to suppress the +creation of debug rpm packages is: + +\footnotesize +\begin{verbatim} + %debug_package %{nil} + +\end{verbatim} + +\normalsize + +\item + \label{faq5} + {\bf I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called /usr/afsws/bin/pagsh.} This + is a shell from the OpenAFS (Andrew File System). If you are seeing + this then you chose to include the docs/examples directory in your + package. One of the example scripts in this directory is a pagsh + script. Rpmbuild, when scanning for dependencies, looks at the shebang + line of all packaged scripts in addition to checking shared libraries. + To avoid this do not package the examples directory. If you are seeing this + problem you are building a very old bacula package as the examples have been + removed from the doc packaging. + +\item + \label{faq6} + {\bf I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?} Yes, + contributions from users are accepted and appreciated. Please examine the + directory platforms/contrib-rpm in the source code for further information. + +\item + \label{faq7} + {\bf Is there an easier way than sorting out all these command line options?} Yes, + there is a gui wizard shell script which you can use to rebuild the src rpm package. + Look in the source archive for platforms/contrib-rpm/rpm\_wizard.sh. This script will + allow you to specify build options using GNOME dialog screens. It requires zenity. + +\item + \label{faq8} + {\bf I just upgraded from 1.36.x to 1.38.x and now my director daemon +won't start. It appears to start but dies silently and I get a "connection +refused" error when starting the console. What is wrong?} Beginning with +1.38 the rpm packages are configured to run the director and storage +daemons as a non-root user. The file daemon runs as user root and group +bacula, the storage daemon as user bacula and group disk, and the director +as user bacula and group bacula. If you are upgrading you will need to +change some file permissions for things to work. Execute the following +commands as root: + +\footnotesize +\begin{verbatim} + chown bacula.bacula /var/bacula/* + chown root.bacula /var/bacula/bacula-fd.9102.state + chown bacula.disk /var/bacula/bacula-sd.9103.state + +\end{verbatim} +\normalsize + +Further, if you are using File storage volumes rather than tapes those +files will also need to have ownership set to user bacula and group bacula. + +\item + \label{faq9} + {\bf There are a lot of rpm packages. Which packages do I need for +what?} For a bacula server you need to select the packsge based upon your +preferred catalog database: one of bacula-mysql, bacula-postgresql or +bacula-sqlite. If your system does not provide an mtx package you also +need bacula-mtx to satisfy that dependancy. For a client machine you need +only install bacula-client. Optionally, for either server or client +machines, you may install a graphical console bacula-gconsole and/or +bacula-wxconsole. The Bacula Administration Tool is installed with the +bacula-bat package. One last package, bacula-updatedb is required only when +upgrading a server more than one database revision level. + + + +\item {\bf Support for RHEL3/4/5, CentOS 3/4/5, Scientific Linux 3/4/5 and x86\_64} + The examples below show + explicit build support for RHEL4 and CentOS 4. Build support + for x86\_64 has also been added. +\end{enumerate} + +\footnotesize +\begin{verbatim} +Build with one of these 3 commands: + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_sqlite 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_postgresql 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_mysql4 1" \ + bacula-1.38.3-1.src.rpm + +For CentOS substitute '--define "build_centos4 1"' in place of rhel4. +For Scientific Linux substitute '--define "build_sl4 1"' in place of rhel4. + +For 64 bit support add '--define "build_x86_64 1"' +\end{verbatim} +\normalsize + +\section{Build Options} +\index[general]{Build Options} +The spec file currently supports building on the following platforms: +\footnotesize +\begin{verbatim} +Red Hat builds +--define "build_rh7 1" +--define "build_rh8 1" +--define "build_rh9 1" + +Fedora Core build +--define "build_fc1 1" +--define "build_fc3 1" +--define "build_fc4 1" +--define "build_fc5 1" +--define "build_fc6 1" +--define "build_fc7 1" + +Whitebox Enterprise build +--define "build_wb3 1" + +Red Hat Enterprise builds +--define "build_rhel3 1" +--define "build_rhel4 1" +--define "build_rhel5 1" + +CentOS build +--define "build_centos3 1" +--define "build_centos4 1" +--define "build_centos5 1" + +Scientific Linux build +--define "build_sl3 1" +--define "build_sl4 1" +--define "build_sl5 1" + +SuSE build +--define "build_su9 1" +--define "build_su10 1" +--define "build_su102 1" +--define "build_su103 1" + +Mandrake 10.x build +--define "build_mdk 1" + +Mandriva build +--define "build_mdv 1" + +MySQL support: +for mysql 3.23.x support define this +--define "build_mysql 1" +if using mysql 4.x define this, +currently: Mandrake 10.x, Mandriva 2006.0, SuSE 9.x & 10.0, FC4 & RHEL4 +--define "build_mysql4 1" +if using mysql 5.x define this, +currently: SuSE 10.1 & FC5 +--define "build_mysql5 1" + +PostgreSQL support: +--define "build_postgresql 1" + +Sqlite support: +--define "build_sqlite 1" + +Build the client rpm only in place of one of the above database full builds: +--define "build_client_only 1" + +X86-64 support: +--define "build_x86_64 1" + +Supress build of bgnome-console: +--define "nobuild_gconsole 1" + +Build the WXWindows console: +requires wxGTK >= 2.6 +--define "build_wxconsole 1" + +Build the Bacula Administration Tool: +requires QT >= 4.2 +--define "build_bat 1" + +Build python scripting support: +--define "build_python 1" + +Modify the Packager tag for third party packages: +--define "contrib_packager Your Name " + +\end{verbatim} +\normalsize + +\section{RPM Install Problems} +\index[general]{RPM Install Problems} +In general the RPMs, once properly built should install correctly. +However, when attempting to run the daemons, a number of problems +can occur: +\begin{itemize} +\item [Wrong /var/bacula Permissions] + By default, the Director and Storage daemon do not run with + root permission. If the /var/bacula is owned by root, then it + is possible that the Director and the Storage daemon will not + be able to access this directory, which is used as the Working + Directory. To fix this, the easiest thing to do is: +\begin{verbatim} + chown bacula:bacula /var/bacula +\end{verbatim} + Note: as of 1.38.8 /var/bacula is installed root:bacula with + permissions 770. +\item [The Storage daemon cannot Access the Tape drive] + This can happen in some older RPM releases where the Storage + daemon ran under userid bacula, group bacula. There are two + ways of fixing this: the best is to modify the /etc/init.d/bacula-sd + file so that it starts the Storage daemon with group "disk". + The second way to fix the problem is to change the permissions + of your tape drive (usually /dev/nst0) so that Bacula can access it. + You will probably need to change the permissions of the SCSI control + device as well, which is usually /dev/sg0. The exact names depend + on your configuration, please see the Tape Testing chapter for + more information on devices. +\end{itemize} + diff --git a/docs/manuals/de/utility/setup.sm b/docs/manuals/de/utility/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/de/utility/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/de/utility/translate_images.pl b/docs/manuals/de/utility/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/de/utility/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/de/utility/update_version b/docs/manuals/de/utility/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/de/utility/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/utility/update_version.in b/docs/manuals/de/utility/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/de/utility/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/de/utility/utility.tex b/docs/manuals/de/utility/utility.tex new file mode 100644 index 00000000..2efa5cde --- /dev/null +++ b/docs/manuals/de/utility/utility.tex @@ -0,0 +1,79 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Utility Programs} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{progs} +\include{bimagemgr-chapter} +\include{rpm-faq} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/de/utility/version.tex b/docs/manuals/de/utility/version.tex new file mode 100644 index 00000000..82d910aa --- /dev/null +++ b/docs/manuals/de/utility/version.tex @@ -0,0 +1 @@ +2.3.6 (04 November 2007) diff --git a/docs/manuals/de/utility/version.tex.in b/docs/manuals/de/utility/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/de/utility/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/concepts/catmaintenance.tex b/docs/manuals/en/concepts/catmaintenance.tex deleted file mode 100644 index eeb36b8b..00000000 --- a/docs/manuals/en/concepts/catmaintenance.tex +++ /dev/null @@ -1,762 +0,0 @@ -%% -%% - -\chapter{Catalog Maintenance} -\label{CatMaintenanceChapter} -\index[general]{Maintenance!Catalog } -\index[general]{Catalog Maintenance } - -Without proper setup and maintenance, your Catalog may continue to grow -indefinitely as you run Jobs and backup Files, and/or it may become -very inefficient and slow. How fast the size of your -Catalog grows depends on the number of Jobs you run and how many files they -backup. By deleting records within the database, you can make space available -for the new records that will be added during the next Job. By constantly -deleting old expired records (dates older than the Retention period), your -database size will remain constant. - -If you started with the default configuration files, they already contain -reasonable defaults for a small number of machines (less than 5), so if you -fall into that case, catalog maintenance will not be urgent if you have a few -hundred megabytes of disk space free. Whatever the case may be, some knowledge -of retention periods will be useful. -\label{Retention} - -\section{Setting Retention Periods} -\index[general]{Setting Retention Periods } -\index[general]{Periods!Setting Retention } - -{\bf Bacula} uses three Retention periods: the {\bf File Retention} period, -the {\bf Job Retention} period, and the {\bf Volume Retention} period. Of -these three, the File Retention period is by far the most important in -determining how large your database will become. - -The {\bf File Retention} and the {\bf Job Retention} are specified in each -Client resource as is shown below. The {\bf Volume Retention} period is -specified in the Pool resource, and the details are given in the next chapter -of this manual. - -\begin{description} - -\item [File Retention = \lt{}time-period-specification\gt{}] - \index[dir]{File Retention } - The File Retention record defines the length of time that Bacula will keep -File records in the Catalog database. When this time period expires, and if -{\bf AutoPrune} is set to {\bf yes}, Bacula will prune (remove) File records -that are older than the specified File Retention period. The pruning will -occur at the end of a backup Job for the given Client. Note that the Client -database record contains a copy of the File and Job retention periods, but -Bacula uses the current values found in the Director's Client resource to do -the pruning. - -Since File records in the database account for probably 80 percent of the -size of the database, you should carefully determine exactly what File -Retention period you need. Once the File records have been removed from -the database, you will no longer be able to restore individual files -in a Job. However, with Bacula version 1.37 and later, as long as the -Job record still exists, you will be able to restore all files in the -job. - -Retention periods are specified in seconds, but as a convenience, there are -a number of modifiers that permit easy specification in terms of minutes, -hours, days, weeks, months, quarters, or years on the record. See the -\ilink{ Configuration chapter}{Time} of this manual for additional details -of modifier specification. - -The default File retention period is 60 days. - -\item [Job Retention = \lt{}time-period-specification\gt{}] - \index[dir]{Job Retention } - The Job Retention record defines the length of time that {\bf Bacula} -will keep Job records in the Catalog database. When this time period -expires, and if {\bf AutoPrune} is set to {\bf yes} Bacula will prune -(remove) Job records that are older than the specified Job Retention -period. Note, if a Job record is selected for pruning, all associated File -and JobMedia records will also be pruned regardless of the File Retention -period set. As a consequence, you normally will set the File retention -period to be less than the Job retention period. - -As mentioned above, once the File records are removed from the database, -you will no longer be able to restore individual files from the Job. -However, as long as the Job record remains in the database, you will be -able to restore all the files backuped for the Job (on version 1.37 and -later). As a consequence, it is generally a good idea to retain the Job -records much longer than the File records. - -The retention period is specified in seconds, but as a convenience, there -are a number of modifiers that permit easy specification in terms of -minutes, hours, days, weeks, months, quarters, or years. See the \ilink{ -Configuration chapter}{Time} of this manual for additional details of -modifier specification. - -The default Job Retention period is 180 days. - -\item [AutoPrune = \lt{}yes/no\gt{}] - \index[dir]{AutoPrune } - If AutoPrune is set to {\bf yes} (default), Bacula will automatically apply -the File retention period and the Job retention period for the Client at the -end of the Job. - -If you turn this off by setting it to {\bf no}, your Catalog will grow each -time you run a Job. -\end{description} - -\label{CompactingMySQL} -\section{Compacting Your MySQL Database} -\index[general]{Database!Compacting Your MySQL } -\index[general]{Compacting Your MySQL Database } - -Over time, as noted above, your database will tend to grow. I've noticed that -even though Bacula regularly prunes files, {\bf MySQL} does not effectively -use the space, and instead continues growing. To avoid this, from time to -time, you must compact your database. Normally, large commercial database such -as Oracle have commands that will compact a database to reclaim wasted file -space. MySQL has the {\bf OPTIMIZE TABLE} command that you can use, and SQLite -version 2.8.4 and greater has the {\bf VACUUM} command. We leave it to you to -explore the utility of the {\bf OPTIMIZE TABLE} command in MySQL. - -All database programs have some means of writing the database out in ASCII -format and then reloading it. Doing so will re-create the database from -scratch producing a compacted result, so below, we show you how you can do -this for MySQL, PostgreSQL and SQLite. - -For a {\bf MySQL} database, you could write the Bacula database as an ASCII -file (bacula.sql) then reload it by doing the following: - -\footnotesize -\begin{verbatim} -mysqldump -f --opt bacula > bacula.sql -mysql bacula < bacula.sql -rm -f bacula.sql -\end{verbatim} -\normalsize - -Depending on the size of your database, this will take more or less time and a -fair amount of disk space. For example, if I cd to the location of the MySQL -Bacula database (typically /opt/mysql/var or something similar) and enter: - -\footnotesize -\begin{verbatim} -du bacula -\end{verbatim} -\normalsize - -I get {\bf 620,644} which means there are that many blocks containing 1024 -bytes each or approximately 635 MB of data. After doing the {\bf mysqldump}, I -had a bacula.sql file that had {\bf 174,356} blocks, and after doing the {\bf -mysql} command to recreate the database, I ended up with a total of {\bf -210,464} blocks rather than the original {\bf 629,644}. In other words, the -compressed version of the database took approximately one third of the space -of the database that had been in use for about a year. - -As a consequence, I suggest you monitor the size of your database and from -time to time (once every six months or year), compress it. - -\label{DatabaseRepair} -\label{RepairingMySQL} -\section{Repairing Your MySQL Database} -\index[general]{Database!Repairing Your MySQL } -\index[general]{Repairing Your MySQL Database } - -If you find that you are getting errors writing to your MySQL database, or -Bacula hangs each time it tries to access the database, you should consider -running MySQL's database check and repair routines. The program you need to -run depends on the type of database indexing you are using. If you are using -the default, you will probably want to use {\bf myisamchk}. For more details -on how to do this, please consult the MySQL document at: -\elink{ -http://www.mysql.com/doc/en/Repair.html} -{http://www.mysql.com/doc/en/Repair.html}. - -If the errors you are getting are simply SQL warnings, then you might try -running dbcheck before (or possibly after) using the MySQL database repair -program. It can clean up many of the orphaned record problems, and certain -other inconsistencies in the Bacula database. - -A typical cause of MySQL database problems is if your partition fills. In -such a case, you will need to create additional space on the partition or -free up some space then repair the database probably using {\bf myisamchk}. -Recently my root partition filled and the MySQL database was corrupted. -Simply running {\bf myisamchk -r} did not fix the problem. However, -the following script did the trick for me: - -\footnotesize -\begin{verbatim} -#!/bin/sh -for i in *.MYD ; do - mv $i x${i} - t=`echo $i | cut -f 1 -d '.' -` - mysql bacula <bacula.db -select * from sqlite_master where type='index' and tbl_name='File'; -\end{verbatim} -\normalsize - -If the indexes are not present, especially the JobId index, you can -create them with the following commands: - -\footnotesize -\begin{verbatim} -mysql bacula -CREATE INDEX file_jobid_idx on File (JobId); -CREATE INDEX file_jfp_idx on File (Job, FilenameId, PathId); -\end{verbatim} -\normalsize - - - -\label{CompactingPostgres} -\section{Compacting Your PostgreSQL Database} -\index[general]{Database!Compacting Your PostgreSQL } -\index[general]{Compacting Your PostgreSQL Database } - -Over time, as noted above, your database will tend to grow. I've noticed that -even though Bacula regularly prunes files, PostgreSQL has a {\bf VACUUM} -command that will compact your database for you. Alternatively you may want to -use the {\bf vacuumdb} command, which can be run from a cron job. - -All database programs have some means of writing the database out in ASCII -format and then reloading it. Doing so will re-create the database from -scratch producing a compacted result, so below, we show you how you can do -this for PostgreSQL. - -For a {\bf PostgreSQL} database, you could write the Bacula database as an -ASCII file (bacula.sql) then reload it by doing the following: - -\footnotesize -\begin{verbatim} -pg_dump -c bacula > bacula.sql -cat bacula.sql | psql bacula -rm -f bacula.sql -\end{verbatim} -\normalsize - -Depending on the size of your database, this will take more or less time and a -fair amount of disk space. For example, you can {\bf cd} to the location of -the Bacula database (typically /usr/local/pgsql/data or possible -/var/lib/pgsql/data) and check the size. - -There are certain PostgreSQL users who do not recommend the above -procedure. They have the following to say: -PostgreSQL does not -need to be dumped/restored to keep the database efficient. A normal -process of vacuuming will prevent the database from every getting too -large. If you want to fine-tweak the database storage, commands such -as VACUUM FULL, REINDEX, and CLUSTER exist specifically to keep you -from having to do a dump/restore. - -Finally, you might want to look at the PostgreSQL documentation on -this subject at -\elink{http://www.postgresql.org/docs/8.1/interactive/maintenance.html} -{http://www.postgresql.org/docs/8.1/interactive/maintenance.html}. - -\section{Compacting Your SQLite Database} -\index[general]{Compacting Your SQLite Database } -\index[general]{Database!Compacting Your SQLite } - -First please read the previous section that explains why it is necessary to -compress a database. SQLite version 2.8.4 and greater have the {\bf Vacuum} -command for compacting the database. - -\footnotesize -\begin{verbatim} -cd {\bf working-directory} -echo 'vacuum;' | sqlite bacula.db -\end{verbatim} -\normalsize - -As an alternative, you can use the following commands, adapted to your system: - - -\footnotesize -\begin{verbatim} -cd {\bf working-directory} -echo '.dump' | sqlite bacula.db > bacula.sql -rm -f bacula.db -sqlite bacula.db < bacula.sql -rm -f bacula.sql -\end{verbatim} -\normalsize - -Where {\bf working-directory} is the directory that you specified in the -Director's configuration file. Note, in the case of SQLite, it is necessary to -completely delete (rm) the old database before creating a new compressed -version. - -\section{Migrating from SQLite to MySQL} -\index[general]{MySQL!Migrating from SQLite to } -\index[general]{Migrating from SQLite to MySQL } - -You may begin using Bacula with SQLite then later find that you want to switch -to MySQL for any of a number of reasons: SQLite tends to use more disk than -MySQL; when the database is corrupted it is often more catastrophic than -with MySQL or PostgreSQL. -Several users have succeeded in converting from SQLite to MySQL by -exporting the MySQL data and then processing it with Perl scripts -prior to putting it into MySQL. This is, however, not a simple -process. - -\label{BackingUpBacula} -\section{Backing Up Your Bacula Database} -\index[general]{Backing Up Your Bacula Database } -\index[general]{Database!Backing Up Your Bacula } - -If ever the machine on which your Bacula database crashes, and you need to -restore from backup tapes, one of your first priorities will probably be to -recover the database. Although Bacula will happily backup your catalog -database if it is specified in the FileSet, this is not a very good way to do -it, because the database will be saved while Bacula is modifying it. Thus the -database may be in an instable state. Worse yet, you will backup the database -before all the Bacula updates have been applied. - -To resolve these problems, you need to backup the database after all the backup -jobs have been run. In addition, you will want to make a copy while Bacula is -not modifying it. To do so, you can use two scripts provided in the release -{\bf make\_catalog\_backup} and {\bf delete\_catalog\_backup}. These files -will be automatically generated along with all the other Bacula scripts. The -first script will make an ASCII copy of your Bacula database into {\bf -bacula.sql} in the working directory you specified in your configuration, and -the second will delete the {\bf bacula.sql} file. - -The basic sequence of events to make this work correctly is as follows: - -\begin{itemize} -\item Run all your nightly backups -\item After running your nightly backups, run a Catalog backup Job -\item The Catalog backup job must be scheduled after your last nightly backup - -\item You use {\bf RunBeforeJob} to create the ASCII backup file and {\bf - RunAfterJob} to clean up -\end{itemize} - -Assuming that you start all your nightly backup jobs at 1:05 am (and that they -run one after another), you can do the catalog backup with the following -additional Director configuration statements: - -\footnotesize -\begin{verbatim} -# Backup the catalog database (after the nightly save) -Job { - Name = "BackupCatalog" - Type = Backup - Client=rufus-fd - FileSet="Catalog" - Schedule = "WeeklyCycleAfterBackup" - Storage = DLTDrive - Messages = Standard - Pool = Default - # WARNING!!! Passing the password via the command line is insecure. - # see comments in make_catalog_backup for details. - RunBeforeJob = "/home/kern/bacula/bin/make_catalog_backup" - RunAfterJob = "/home/kern/bacula/bin/delete_catalog_backup" - Write Bootstrap = "/home/kern/bacula/working/BackupCatalog.bsr" -} -# This schedule does the catalog. It starts after the WeeklyCycle -Schedule { - Name = "WeeklyCycleAfterBackup - Run = Level=Full sun-sat at 1:10 -} -# This is the backup of the catalog -FileSet { - Name = "Catalog" - Include { - Options { - signature=MD5 - } - File = \lt{}working_directory\gt{}/bacula.sql - } -} -\end{verbatim} -\normalsize - -Be sure to write a bootstrap file as in the above example. However, it is preferable -to write or copy the bootstrap file to another computer. It will allow -you to quickly recover the database backup should that be necessary. If -you do not have a bootstrap file, it is still possible to recover your -database backup, but it will be more work and take longer. - - -\label{BackingUpBaculaSecurityConsiderations} -\section{Security considerations} -\index[general]{Backing Up Your Bacula Database - Security Considerations } -\index[general]{Database!Backing Up Your Bacula Database - Security Considerations } - -We provide make\_catalog\_backup as an example of what can be used to backup -your Bacula database. We expect you to take security precautions relevant -to your situation. make\_catalog\_backup is designed to take a password on -the command line. This is fine on machines with only trusted users. It is -not acceptable on machines without trusted users. Most database systems -provide a alternative method, which does not place the password on the -command line. - -The make\_catalog\_backup script contains some warnings about how to use it. Please -read those tips. - -To help you get started, we know PostgreSQL has a password file, -\elink{ -.pgpass}{http://www.postgresql.org/docs/8.2/static/libpq-pgpass.html}, and -we know MySQL has -\elink{ .my.cnf}{http://dev.mysql.com/doc/refman/4.1/en/password-security.html}. - -Only you can decide what is appropriate for your situation. We have provided -you with a starting point. We hope it helps. - - -\label{BackingUPOtherDBs} -\section{Backing Up Third Party Databases} -\index[general]{Backing Up Third Party Databases } -\index[general]{Databases!Backing Up Third Party } - -If you are running a database in production mode on your machine, Bacula will -happily backup the files, but if the database is in use while Bacula is -reading it, you may back it up in an unstable state. - -The best solution is to shutdown your database before backing it up, or use -some tool specific to your database to make a valid live copy perhaps by -dumping the database in ASCII format. I am not a database expert, so I cannot -provide you advice on how to do this, but if you are unsure about how to -backup your database, you might try visiting the Backup Central site, which -has been renamed Storage Mountain (www.backupcentral.com). In particular, -their -\elink{ Free Backup and Recovery -Software}{http://www.backupcentral.com/toc-free-backup-software.html} page has -links to scripts that show you how to shutdown and backup most major -databases. -\label{Size} - -\section{Database Size} -\index[general]{Size!Database } -\index[general]{Database Size } - -As mentioned above, if you do not do automatic pruning, your Catalog will grow -each time you run a Job. Normally, you should decide how long you want File -records to be maintained in the Catalog and set the {\bf File Retention} -period to that time. Then you can either wait and see how big your Catalog -gets or make a calculation assuming approximately 154 bytes for each File -saved and knowing the number of Files that are saved during each backup and -the number of Clients you backup. - -For example, suppose you do a backup of two systems, each with 100,000 files. -Suppose further that you do a Full backup weekly and an Incremental every day, -and that the Incremental backup typically saves 4,000 files. The size of your -database after a month can roughly be calculated as: - -\footnotesize -\begin{verbatim} - Size = 154 * No. Systems * (100,000 * 4 + 10,000 * 26) -\end{verbatim} -\normalsize - -where we have assumed four weeks in a month and 26 incremental backups per month. -This would give the following: - -\footnotesize -\begin{verbatim} - Size = 154 * 2 * (100,000 * 4 + 10,000 * 26) -or - Size = 308 * (400,000 + 260,000) -or - Size = 203,280,000 bytes -\end{verbatim} -\normalsize - -So for the above two systems, we should expect to have a database size of -approximately 200 Megabytes. Of course, this will vary according to how many -files are actually backed up. - -Below are some statistics for a MySQL database containing Job records for five -Clients beginning September 2001 through May 2002 (8.5 months) and File -records for the last 80 days. (Older File records have been pruned). For these -systems, only the user files and system files that change are backed up. The -core part of the system is assumed to be easily reloaded from the Red Hat rpms. - - -In the list below, the files (corresponding to Bacula Tables) with the -extension .MYD contain the data records whereas files with the extension .MYI -contain indexes. - -You will note that the File records (containing the file attributes) make up -the large bulk of the number of records as well as the space used (459 Mega -Bytes including the indexes). As a consequence, the most important Retention -period will be the {\bf File Retention} period. A quick calculation shows that -for each File that is saved, the database grows by approximately 150 bytes. - -\footnotesize -\begin{verbatim} - Size in - Bytes Records File - ============ ========= =========== - 168 5 Client.MYD - 3,072 Client.MYI - 344,394,684 3,080,191 File.MYD - 115,280,896 File.MYI - 2,590,316 106,902 Filename.MYD - 3,026,944 Filename.MYI - 184 4 FileSet.MYD - 2,048 FileSet.MYI - 49,062 1,326 JobMedia.MYD - 30,720 JobMedia.MYI - 141,752 1,378 Job.MYD - 13,312 Job.MYI - 1,004 11 Media.MYD - 3,072 Media.MYI - 1,299,512 22,233 Path.MYD - 581,632 Path.MYI - 36 1 Pool.MYD - 3,072 Pool.MYI - 5 1 Version.MYD - 1,024 Version.MYI -\end{verbatim} -\normalsize - -This database has a total size of approximately 450 Megabytes. - -If we were using SQLite, the determination of the total database size would be -much easier since it is a single file, but we would have less insight to the -size of the individual tables as we have in this case. - -Note, SQLite databases may be as much as 50\% larger than MySQL databases due -to the fact that all data is stored as ASCII strings. That is even binary -integers are stored as ASCII strings, and this seems to increase the space -needed. diff --git a/docs/manuals/en/concepts/oldfileset.tex b/docs/manuals/en/concepts/oldfileset.tex deleted file mode 100644 index 43a190fa..00000000 --- a/docs/manuals/en/concepts/oldfileset.tex +++ /dev/null @@ -1,677 +0,0 @@ -%% -%% - -\chapter{The Old FileSet Resource} -\label{OldFileSetChapter} -\label{FileSetResource} -\index[general]{Resource!Old FileSet } -\index[general]{Old FileSet Resource } - -Note, this form of the FileSet resource still works but has been replaced by a -new more flexible form in Bacula version 1.34.3. As a consequence, you are -encouraged to convert to the new form as this one is deprecated and will be -removed in a future version. - -The FileSet resource defines what files are to be included in a backup job. At -least one {\bf FileSet} resource is required. It consists of a list of files -or directories to be included, a list of files or directories to be excluded -and the various backup options such as compression, encryption, and signatures -that are to be applied to each file. - -Any change to the list of the included files will cause Bacula to -automatically create a new FileSet (defined by the name and an MD5 checksum of -the Include contents). Each time a new FileSet is created, Bacula will ensure -that the first backup is always a Full save. - -\begin{description} - -\item {\bf FileSet} -\index[dir]{FileSet } -Start of the FileSet records. At least one {\bf FileSet} resource must be -defined. - -\item {\bf Name = \lt{}name\gt{}} -\index[dir]{Name } -The name of the FileSet resource. This record is required. - -\item {\bf Include = \lt{}processing-options\gt{} -\ \ \{ \lt{}file-list\gt{} \} -} -\index[dir]{Include } - -The Include resource specifies the list of files and/or directories to be -included in the backup job. There can be any number of {\bf Include} {\bf -file-list} specifications within the FileSet, each having its own set of {\bf -processing-options}. Normally, the {\bf file-list} consists of one file or -directory name per line. Directory names should be specified without a -trailing slash. Wild-card (or glob matching) does not work when used in an -Include list. It does work in an Exclude list though. Just the same, any -asterisk (*), question mark (?), or left-bracket ([) must be preceded by a -slash (\textbackslash{}\textbackslash{}) if you want it to represent the -literal character. - -You should {\bf always} specify a full path for every directory and file that -you list in the FileSet. In addition, on Windows machines, you should {\bf -always} prefix the directory or filename with the drive specification (e.g. -{\bf c:/xxx}) using Unix directory name separators (forward slash). However, -within an {\bf Exclude} where for some reason the exclude will not work with a -prefixed drive letter. If you want to specify a drive letter in exclusions on -Win32 systems, you can do so by specifying: - -\footnotesize -\begin{verbatim} - Exclude = { /cygdrive/d/archive/Mulberry } -\end{verbatim} -\normalsize - -where in this case, the {\bf /cygdrive/d} \&nsbp; is Cygwin's way of referring -to drives on Win32 (thanks to Mathieu Arnold for this tip). - -Bacula's default for processing directories is to recursively descend in the -directory saving all files and subdirectories. Bacula will not by default -cross file systems (or mount points in Unix parlance). This means that if you -specify the root partition (e.g. {\bf /}), Bacula will save only the root -partition and not any of the other mounted file systems. Similarly on Windows -systems, you must explicitly specify each of the drives you want saved (e.g. -{\bf c:/} and {\bf d:/} ...). In addition, at least for Windows systems, you -will most likely want to enclose each specification within double quotes. The -{\bf df} command on Unix systems will show you which mount points you must -specify to save everything. See below for an example. - -Take special care not to include a directory twice or Bacula will backup the -same files two times wasting a lot of space on your archive device. Including -a directory twice is very easy to do. For example: - -\footnotesize -\begin{verbatim} - Include = { / /usr } -\end{verbatim} -\normalsize - -on a Unix system where /usr is a subdirectory (rather than a mounted -filesystem) will cause /usr to be backed up twice. In this case, on Bacula -versions prior to 1.32f-5-09Mar04 due to a bug, you will not be able to -restore hard linked files that were backed up twice. - -The {\bf \lt{}processing-options\gt{}} is optional. If specified, it is a list -of {\bf keyword=value} options to be applied to the file-list. Multiple -options may be specified by separating them with spaces. These options are -used to modify the default processing behavior of the files included. Since -there can be multiple {\bf Include} sets, this permits effectively specifying -the desired options (compression, encryption, ...) on a file by file basis. -The options may be one of the following: - -\begin{description} - -\item {\bf compression=GZIP} -\index[fd]{compression } -All files saved will be software compressed using the GNU ZIP compression -format. The compression is done on a file by file basis by the File daemon. -If there is a problem reading the tape in a single record of a file, it will -at most affect that file and none of the other files on the tape. Normally -this option is {\bf not} needed if you have a modern tape drive as the drive -will do its own compression. However, compression is very important if you -are writing your Volumes to a file, and it can also be helpful if you have a -fast computer but a slow network. - -Specifying {\bf GZIP} uses the default compression level six (i.e. {\bf GZIP} -is identical to {\bf GZIP6}). If you want a different compression level (1 -through 9), you can specify it by appending the level number with no -intervening spaces to {\bf GZIP}. Thus {\bf compression=GZIP1} would give -minimum compression but the fastest algorithm, and {\bf compression=GZIP9} -would give the highest level of compression, but requires more computation. -According to the GZIP documentation, compression levels greater than 6 -generally give very little extra compression but are rather CPU intensive. - -\item {\bf signature=MD5} -\index[fd]{signature } -An MD5 signature will be computed for all files saved. Adding this option -generates about 5\% extra overhead for each file saved. In addition to the -additional CPU time, the MD5 signature adds 16 more bytes per file to your -catalog. We strongly recommend that this option be specified as a default -for all files. - -\item {\bf signature=SHA1} -\index[fd]{signature } -An SHA1 signature will be computed for all The SHA1 algorithm is purported to -be some what slower than the MD5 algorithm, but at the same time is -significantly better from a cryptographic point of view (i.e. much fewer -collisions, much lower probability of being hacked.) It adds four more bytes -than the MD5 signature. We strongly recommend that either this option or MD5 -be specified as a default for all files. Note, only one of the two options -MD5 or SHA1 can be computed for any file. - -\item {\bf *encryption=\lt{}algorithm\gt{}} -\index[fd]{*encryption } -All files saved will be encrypted using one of the following algorithms (NOT -YET IMPLEMENTED): - -\begin{description} - -\item {\bf *AES} -\index[fd]{*AES } -\end{description} - -\item {\bf verify=\lt{}options\gt{}} -\index[fd]{verify } -The options letters specified are used when running a {\bf Verify -Level=Catalog} job, and may be any combination of the following: - -\begin{description} - -\item {\bf i} -compare the inodes - -\item {\bf p} -compare the permission bits - -\item {\bf n} -compare the number of links - -\item {\bf u} -compare the user id - -\item {\bf g} -compare the group id - -\item {\bf s} -compare the size - -\item {\bf a} -compare the access time - -\item {\bf m} -compare the modification time (st\_mtime) - -\item {\bf c} -compare the change time (st\_ctime) - -\item {\bf s} -report file size decreases - -\item {\bf 5} -compare the MD5 signature - -\item {\bf 1} -compare the SHA1 signature -\end{description} - -A useful set of general options on the {\bf Level=Catalog} verify is {\bf -pins5} i.e. compare permission bits, inodes, number of links, size, and MD5 -changes. - -\item {\bf onefs=yes|no} -\index[fd]{onefs } -If set to {\bf yes} (the default), {\bf Bacula} will remain on a single file -system. That is it will not backup file systems that are mounted on a -subdirectory. In this case, you must explicitly list each file system you -want saved. If you set this option to {\bf no}, Bacula will backup all -mounted file systems (i.e. traverse mount points) that are found within the -{\bf FileSet}. Thus if you have NFS or Samba file systems mounted on a -directory included in your FileSet, they will also be backed up. Normally, it -is preferable to set {\bf onefs=yes} and to explicitly name each file system -you want backed up. See the example below for more details. -\label{portable} - -\item {\bf portable=yes|no} -\index[fd]{portable } -If set to {\bf yes} (default is {\bf no}), the Bacula File daemon will backup -Win32 files in a portable format. By default, this option is set to {\bf -no}, which means that on Win32 systems, the data will be backed up using -Windows API calls and on WinNT/2K/XP, the security and ownership data will be -properly backed up (and restored), but the data format is not portable to -other systems -- e.g. Unix, Win95/98/Me. On Unix systems, this option is -ignored, and unless you have a specific need to have portable backups, we -recommend accept the default ({\bf no}) so that the maximum information -concerning your files is backed up. - -\item {\bf recurse=yes|no} -\index[fd]{recurse } -If set to {\bf yes} (the default), Bacula will recurse (or descend) into all -subdirectories found unless the directory is explicitly excluded using an -{\bf exclude} definition. If you set {\bf recurse=no}, Bacula will save the -subdirectory entries, but not descend into the subdirectories, and thus will -not save the contents of the subdirectories. Normally, you will want the -default ({\bf yes}). - -\item {\bf sparse=yes|no} -\index[dir]{sparse } -Enable special code that checks for sparse files such as created by ndbm. The -default is {\bf no}, so no checks are made for sparse files. You may specify -{\bf sparse=yes} even on files that are not sparse file. No harm will be -done, but there will be a small additional overhead to check for buffers of -all zero, and a small additional amount of space on the output archive will -be used to save the seek address of each non-zero record read. - -{\bf Restrictions:} Bacula reads files in 32K buffers. If the whole buffer is -zero, it will be treated as a sparse block and not written to tape. However, -if any part of the buffer is non-zero, the whole buffer will be written to -tape, possibly including some disk sectors (generally 4098 bytes) that are -all zero. As a consequence, Bacula's detection of sparse blocks is in 32K -increments rather than the system block size. If anyone considers this to be -a real problem, please send in a request for change with the reason. The -sparse code was first implemented in version 1.27. - -If you are not familiar with sparse files, an example is say a file where you -wrote 512 bytes at address zero, then 512 bytes at address 1 million. The -operating system will allocate only two blocks, and the empty space or hole -will have nothing allocated. However, when you read the sparse file and read -the addresses where nothing was written, the OS will return all zeros as if -the space were allocated, and if you backup such a file, a lot of space will -be used to write zeros to the volume. Worse yet, when you restore the file, -all the previously empty space will now be allocated using much more disk -space. By turning on the {\bf sparse} option, Bacula will specifically look -for empty space in the file, and any empty space will not be written to the -Volume, nor will it be restored. The price to pay for this is that Bacula -must search each block it reads before writing it. On a slow system, this may -be important. If you suspect you have sparse files, you should benchmark the -difference or set sparse for only those files that are really sparse. -\label{readfifo} - -\item {\bf readfifo=yes|no} -\index[fd]{readfifo } -If enabled, tells the Client to read the data on a backup and write the data -on a restore to any FIFO (pipe) that is explicitly mentioned in the FileSet. -In this case, you must have a program already running that writes into the -FIFO for a backup or reads from the FIFO on a restore. This can be -accomplished with the {\bf RunBeforeJob} record. If this is not the case, -Bacula will hang indefinitely on reading/writing the FIFO. When this is not -enabled (default), the Client simply saves the directory entry for the FIFO. - -\item {\bf mtimeonly=yes|no} -\index[dir]{mtimeonly } -If enabled, tells the Client that the selection of files during Incremental -and Differential backups should based only on the st\_mtime value in the -stat() packet. The default is {\bf no} which means that the selection of -files to be backed up will be based on both the st\_mtime and the st\_ctime -values. In general, it is not recommended to use this option. - -\item {\bf keepatime=yes|no} -\index[dir]{keepatime } -The default is {\bf no}. When enabled, Bacula will reset the st\_atime -(access time) field of files that it backs up to their value prior to the -backup. This option is not generally recommended as there are very few -programs that use st\_atime, and the backup overhead is increased because of -the additional system call necessary to reset the times. (I'm not sure this -works on Win32). -\end{description} - -{\bf \lt{}file-list\gt{}} is a space separated list of filenames and/or -directory names. To include names containing spaces, enclose the name between -double-quotes. The list may span multiple lines, in fact, normally it is good -practice to specify each filename on a separate line. - -There are a number of special cases when specifying files or directories in a -{\bf file-list}. They are: - -\begin{itemize} -\item Any file-list item preceded by an at-sign (@) is assumed to be a -filename containing a list of files, which is read when the configuration -file is parsed during Director startup. Note, that the file is read on the -Director's machine and not on the Client. -\item Any file-list item beginning with a vertical bar (|) is assumed to be a -program. This program will be executed on the Director's machine at the time -the Job starts (not when the Director reads the configuration file), and any -output from that program will be assumed to be a list of files or -directories, one per line, to be included. This allows you to have a job that -for example includes all the local partitions even if you change the -partitioning by adding a disk. In general, you will need to prefix your -command or commands with a {\bf sh -c} so that they are invoked by a shell. -This will not be the case if you are invoking a script as in the second -example below. Also, you must take care to escape wild-cards and ensure that -any spaces in your command are escaped as well. If you use a single quotes -(') within a double quote ("), Bacula will treat everything between the -single quotes as one field so it will not be necessary to escape the spaces. -In general, getting all the quotes and escapes correct is a real pain as you -can see by the next example. As a consequence, it is often easier to put -everything in a file, and simply us the file name within Bacula. In that case -the {\bf sh -c} will not be necessary providing the first line of the file is - {\bf \#!/bin/sh}. - -As an example: - -\footnotesize -\begin{verbatim} - -Include = signature=SHA1 { - "|sh -c 'df -l | grep \"^/dev/hd[ab]\" | grep -v \".*/tmp\" \ - | awk \"{print \\$6}\"'" -} -\end{verbatim} -\normalsize - -will produce a list of all the local partitions on a Red Hat Linux system. -Note, the above line was split, but should normally be written on one line. -Quoting is a real problem because you must quote for Bacula which consists of -preceding every \textbackslash{} and every " with a \textbackslash{}, and -you must also quote for the shell command. In the end, it is probably easier -just to execute a small file with: - -\footnotesize -\begin{verbatim} -Include = signature=MD5 { - "|my_partitions" -} -\end{verbatim} -\normalsize - -where my\_partitions has: - -\footnotesize -\begin{verbatim} -#!/bin/sh -df -l | grep "^/dev/hd[ab]" | grep -v ".*/tmp" \ - | awk "{print \$6}" -\end{verbatim} -\normalsize - -If the vertical bar (|) is preceded by a backslash as in \textbackslash{}|, -the program will be executed on the Client's machine instead of on the -Director's machine -- (this is implemented but not tested, and very likely -will not work on Windows). -\item Any file-list item preceded by a less-than sign (\lt{}) will be taken -to be a file. This file will be read on the Director's machine at the time -the Job starts, and the data will be assumed to be a list of directories or -files, one per line, to be included. This feature allows you to modify the -external file and change what will be saved without stopping and restarting -Bacula as would be necessary if using the @ modifier noted above. - -If you precede the less-than sign (\lt{}) with a backslash as in -\textbackslash{}\lt{}, the file-list will be read on the Client machine -instead of on the Director's machine (implemented but not tested). -\item If you explicitly specify a block device such as {\bf /dev/hda1}, then -Bacula (starting with version 1.28) will assume that this is a raw partition -to be backed up. In this case, you are strongly urged to specify a {\bf -sparse=yes} include option, otherwise, you will save the whole partition -rather than just the actual data that the partition contains. For example: - -\footnotesize -\begin{verbatim} -Include = signature=MD5 sparse=yes { - /dev/hd6 -} -\end{verbatim} -\normalsize - -will backup the data in device /dev/hd6. - -Ludovic Strappazon has pointed out that this feature can be used to backup a -full Microsoft Windows disk. Simply boot into the system using a Linux Rescue -disk, then load a statically linked Bacula as described in the -\ilink{ Disaster Recovery Using Bacula}{RescueChapter} chapter of -this manual. Then simply save the whole disk partition. In the case of a -disaster, you can then restore the desired partition. -\item If you explicitly specify a FIFO device name (created with mkfifo), and -you add the option {\bf readfifo=yes} as an option, Bacula will read the FIFO -and back its data up to the Volume. For example: - -\footnotesize -\begin{verbatim} -Include = signature=SHA1 readfifo=yes { - /home/abc/fifo -} -\end{verbatim} -\normalsize - -if {\bf /home/abc/fifo} is a fifo device, Bacula will open the fifo, read it, -and store all data thus obtained on the Volume. Please note, you must have a -process on the system that is writing into the fifo, or Bacula will hang, and -after one minute of waiting, it will go on to the next file. The data read -can be anything since Bacula treats it as a stream. - -This feature can be an excellent way to do a "hot" backup of a very large -database. You can use the {\bf RunBeforeJob} to create the fifo and to start -a program that dynamically reads your database and writes it to the fifo. -Bacula will then write it to the Volume. - -During the restore operation, the inverse is true, after Bacula creates the -fifo if there was any data stored with it (no need to explicitly list it or -add any options), that data will be written back to the fifo. As a -consequence, if any such FIFOs exist in the fileset to be restored, you must -ensure that there is a reader program or Bacula will block, and after one -minute, Bacula will time out the write to the fifo and move on to the next -file. -\end{itemize} - -The Exclude Files specifies the list of files and/or directories to be -excluded from the backup job. The {\bf \lt{}file-list\gt{}} is a comma or -space separated list of filenames and/or directory names. To exclude names -containing spaces, enclose the name between double-quotes. Most often each -filename is on a separate line. - -For exclusions on Windows systems, do not include a leading drive letter such -as {\bf c:}. This does not work. Any filename preceded by an at-sign (@) is -assumed to be a filename on the Director's machine containing a list of files. - -\end{description} - -The following is an example of a valid FileSet resource definition: - -\footnotesize -\begin{verbatim} -FileSet { - Name = "Full Set" - Include = compression=GZIP signature=SHA1 sparse=yes { - @/etc/backup.list - } - Include = { - /root/myfile - /usr/lib/another_file - } - Exclude = { *.o } -} -\end{verbatim} -\normalsize - -Note, in the above example, all the files contained in /etc/backup.list will -be compressed with GZIP compression, an SHA1 signature will be computed on the -file's contents (its data), and sparse file handling will apply. - -The two files /root/myfile and /usr/lib/another\_file will also be saved but -without any options. In addition, all files with the extension {\bf .o} will -be excluded from the file set (i.e. from the backup). - -Suppose you want to save everything except {\bf /tmp} on your system. Doing a -{\bf df} command, you get the following output: - -\footnotesize -\begin{verbatim} -[kern@rufus k]$ df -Filesystem 1k-blocks Used Available Use% Mounted on -/dev/hda5 5044156 439232 4348692 10% / -/dev/hda1 62193 4935 54047 9% /boot -/dev/hda9 20161172 5524660 13612372 29% /home -/dev/hda2 62217 6843 52161 12% /rescue -/dev/hda8 5044156 42548 4745376 1% /tmp -/dev/hda6 5044156 2613132 2174792 55% /usr -none 127708 0 127708 0% /dev/shm -//minimatou/c$ 14099200 9895424 4203776 71% /mnt/mmatou -lmatou:/ 1554264 215884 1258056 15% /mnt/matou -lmatou:/home 2478140 1589952 760072 68% /mnt/matou/home -lmatou:/usr 1981000 1199960 678628 64% /mnt/matou/usr -lpmatou:/ 995116 484112 459596 52% /mnt/pmatou -lpmatou:/home 19222656 2787880 15458228 16% /mnt/pmatou/home -lpmatou:/usr 2478140 2038764 311260 87% /mnt/pmatou/usr -deuter:/ 4806936 97684 4465064 3% /mnt/deuter -deuter:/home 4806904 280100 4282620 7% /mnt/deuter/home -deuter:/files 44133352 27652876 14238608 67% /mnt/deuter/files -\end{verbatim} -\normalsize - -Now, if you specify only {\bf /} in your Include list, Bacula will only save -the Filesystem {\bf /dev/hda5}. To save all file systems except {\bf /tmp} -with out including any of the Samba or NFS mounted systems, and explicitly -excluding a /tmp, /proc, .journal, and .autofsck, which you will not want to -be saved and restored, you can use the following: - -\footnotesize -\begin{verbatim} -FileSet { - Name = Everything - Include = { - / - /boot - /home - /rescue - /usr - } - Exclude = { - /proc - /tmp - .journal - .autofsck - } -} -\end{verbatim} -\normalsize - -Since /tmp is on its own filesystem and it was not explicitly named in the -Include list, it is not really needed in the exclude list. It is better to -list it in the Exclude list for clarity, and in case the disks are changed so -that it is no longer in its own partition. - -Please be aware that allowing Bacula to traverse or change file systems can be -{\bf very} dangerous. For example, with the following: - -\footnotesize -\begin{verbatim} -FileSet { - Name = "Bad example" - Include = onefs=no { - /mnt/matou - } -} -\end{verbatim} -\normalsize - -you will be backing up an NFS mounted partition ({\bf /mnt/matou}), and since -{\bf onefs} is set to {\bf no}, Bacula will traverse file systems. However, if -{\bf /mnt/matou} has the current machine's file systems mounted, as is often -the case, you will get yourself into a recursive loop and the backup will -never end. - -The following FileSet definition will backup a raw partition: - -\footnotesize -\begin{verbatim} -FileSet { - Name = "RawPartition" - Include = sparse=yes { - /dev/hda2 - } -} -\end{verbatim} -\normalsize - -Note, in backing up and restoring a raw partition, you should ensure that no -other process including the system is writing to that partition. As a -precaution, you are strongly urged to ensure that the raw partition is not -mounted or is mounted read-only. If necessary, this can be done using the {\bf -RunBeforeJob} record. - -\section{Additional Considerations for Using Excludes on Windows} -\index[general]{Additional Considerations for Using Excludes on Windows } -\index[general]{Windows!Additional Considerations for Using Excludes on } -on Windows} - -For exclude lists to work correctly on Windows, you must observe the following -rules: - -\begin{itemize} -\item Filenames are case sensitive, so you must use the correct case. -\item To exclude a directory, you must not have a trailing slash on the -directory name. -\item If you have spaces in your filename, you must enclose the entire name -in double-quote characters ("). Trying to use a backslash before the space -will not work. -\item You must not precede the excluded file or directory with a drive letter -(such as {\bf c:}) otherwise it will not work. -\end{itemize} - -Thanks to Thiago Lima for summarizing the above items for us. If you are -having difficulties getting includes or excludes to work, you might want to -try using the {\bf estimate job=xxx listing} command documented in the -\ilink{Console chapter}{console.tex#estimate} of this manual. -\label{win32} - -\section{Windows Considerations for FileSets} -\index[general]{FileSets!Windows Considerations for } -\index[general]{Windows Considerations for FileSets } - -If you are entering Windows file names, the directory path may be preceded by -the drive and a colon (as in c:). However, the path separators must be -specified in Unix convention (i.e. forward slash (/)). If you wish to include -a quote in a file name, precede the quote with a backslash -(\textbackslash{}\textbackslash{}). For example you might use the following -for a Windows machine to backup the "My Documents" directory: - -\footnotesize -\begin{verbatim} -FileSet { - Name = "Windows Set" - Include = { - "c:/My Documents" - } - Exclude = { *.obj *.exe } -} -\end{verbatim} -\normalsize - -When using exclusion on Windows, do not use a drive prefix (i.e. {\bf c:}) as -it will prevent the exclusion from working. However, if you need to specify a -drive letter in exclusions on Win32 systems, you can do so by specifying: - -\footnotesize -\begin{verbatim} - Exclude = { /cygdrive/d/archive/Mulberry } -\end{verbatim} -\normalsize - -where in this case, the {\bf /cygdrive/d} is Cygwin's way of referring to -drive {\bf d:} (thanks to Mathieu Arnold for this tip). - -\section{A Windows Example FileSet} -\index[general]{FileSet!Windows Example } -\index[general]{Windows Example FileSet } - -The following example was contributed by Phil Stracchino: - -\footnotesize -\begin{verbatim} -This is my Windows 2000 fileset: -FileSet { - Name = "Windows 2000 Full Set" - Include = signature=MD5 { - c:/ - } -# Most of these files are excluded not because we don't want -# them, but because Win2K won't allow them to be backed up -# except via proprietary Win32 API calls. - Exclude = { - "/Documents and Settings/*/Application Data/*/Profiles/*/*/ - Cache/*" - "/Documents and Settings/*/Local Settings/Application Data/ - Microsoft/Windows/[Uu][Ss][Rr][Cc][Ll][Aa][Ss][Ss].*" - "/Documents and Settings/*/[Nn][Tt][Uu][Ss][Ee][Rr].*" - "/Documents and Settings/*/Cookies/*" - "/Documents and Settings/*/Local Settings/History/*" - "/Documents and Settings/*/Local Settings/ - Temporary Internet Files/*" - "/Documents and Settings/*/Local Settings/Temp/*" - "/WINNT/CSC" - "/WINNT/security/logs/scepol.log" - "/WINNT/system32/config/*" - "/WINNT/msdownld.tmp/*" - "/WINNT/Internet Logs/*" - "/WINNT/$Nt*Uninstall*" - "/WINNT/Temp/*" - "/temp/*" - "/tmp/*" - "/pagefile.sys" - } -} -\end{verbatim} -\normalsize - -Note, the three line of the above Exclude were split to fit on the document -page, they should be written on a single line in real use. diff --git a/docs/manuals/en/problems/faq.css b/docs/manuals/en/problems/faq.css deleted file mode 100644 index d1824aff..00000000 --- a/docs/manuals/en/problems/faq.css +++ /dev/null @@ -1,30 +0,0 @@ -/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ -.MATH { font-family: "Century Schoolbook", serif; } -.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } -.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } - -/* implement both fixed-size and relative sizes */ -SMALL.XTINY { font-size : xx-small } -SMALL.TINY { font-size : x-small } -SMALL.SCRIPTSIZE { font-size : smaller } -SMALL.FOOTNOTESIZE { font-size : small } -SMALL.SMALL { } -BIG.LARGE { } -BIG.XLARGE { font-size : large } -BIG.XXLARGE { font-size : x-large } -BIG.HUGE { font-size : larger } -BIG.XHUGE { font-size : xx-large } - -/* heading styles */ -H1 { } -H2 { } -H3 { } -H4 { } -H5 { } - -/* mathematics styles */ -DIV.displaymath { } /* math displays */ -TD.eqno { } /* equation-number cells */ - - -/* document-specific styles come next */ diff --git a/docs/manuals/en/utility/faq.css b/docs/manuals/en/utility/faq.css deleted file mode 100644 index d1824aff..00000000 --- a/docs/manuals/en/utility/faq.css +++ /dev/null @@ -1,30 +0,0 @@ -/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ -.MATH { font-family: "Century Schoolbook", serif; } -.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } -.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } - -/* implement both fixed-size and relative sizes */ -SMALL.XTINY { font-size : xx-small } -SMALL.TINY { font-size : x-small } -SMALL.SCRIPTSIZE { font-size : smaller } -SMALL.FOOTNOTESIZE { font-size : small } -SMALL.SMALL { } -BIG.LARGE { } -BIG.XLARGE { font-size : large } -BIG.XXLARGE { font-size : x-large } -BIG.HUGE { font-size : larger } -BIG.XHUGE { font-size : xx-large } - -/* heading styles */ -H1 { } -H2 { } -H3 { } -H4 { } -H5 { } - -/* mathematics styles */ -DIV.displaymath { } /* math displays */ -TD.eqno { } /* equation-number cells */ - - -/* document-specific styles come next */