From: Kern Sibbald Date: Thu, 2 Jun 2005 16:23:05 +0000 (+0000) Subject: Add base for German manual X-Git-Tag: Release-1.38.0~398 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=e348ba6f03cf4e6fcf78821f810af9a7f611379a;p=bacula%2Fdocs Add base for German manual --- diff --git a/docs/manual-de/.cvsignore b/docs/manual-de/.cvsignore new file mode 100644 index 00000000..ccafca61 --- /dev/null +++ b/docs/manual-de/.cvsignore @@ -0,0 +1,41 @@ +imagename_translations +developers +developers.html +developers.pdf +bacula.html +bacula.pdf +bacula +*.aux +*.png +*.idx +*.eps +*.jpg +*.dvi +*.out +*.log +*.toc +*.lof +*.ilg +*.dvi +*.css +*.lot +*.cdx +*.ddx +*.fdx +*.sdx +*.cnd +*.dnd +*.fnd +*.ind +*.snd +WARNINGS +internals.pl +index.html +labels.pl +1 +2 +images.pl +*.linked.tex +images.tex +developersi.tex +baculai-*.tex diff --git a/docs/manual-de/Makefile b/docs/manual-de/Makefile new file mode 100644 index 00000000..cc6da2f1 --- /dev/null +++ b/docs/manual-de/Makefile @@ -0,0 +1,184 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make links +# make all +# make web +# make html +# make pdf +# +# or simply +# +# make +# + +IMAGES=../images + +# bacula -- special case below +# running -- special case below +# developers -- special case below + +# +# Note, these are all parts of the manual not in any +# particular order (mostly alphabetic). The order they +# appear in the manual is defined in bacula.tex +# +# Note also that certain of these chapters are in the +# bacula.tex manual, and others in the developers.tex +# manual. +# +MANUAL = \ + autochangers bootstrap bugs catalog \ + catmaintenance configure consoleconf console critical \ + daemonprotocol dirdconf director disk faq filedconf file \ + firewalls gpl install internaldb kaboom lesser \ + license messagesres monitorconf mysql oldfileset pools \ + porting postgresql progs projects quickstart recycling \ + regression rescuefloppy rescue restore rpm-faq \ + security spooling sqlite state storage \ + storedconf strategies stunnel \ + requirements supportedoses supporteddrives tapetesting \ + thanks tips vars verify win32 \ + daemonprotocol gui-interface \ + supportedchangers \ + md5 mediaformat mempool netprotocol porting smartall + + +first_rule: bacula + +bacula: tex web html dvipdf + +# Note, assume bacula manual was built first +# I've removed devhtml so that the resulting files +# (images) do not conflict with the main manual +developers: devtex devweb devpdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + @touch baculai-dir.tex baculai-fd.tex baculai-sd.tex \ + baculai-console.tex baculai-general.tex + -latex -interaction=batchmode bacula.tex + makeindex bacula.idx -o bacula.ind 2>/dev/null + makeindex bacula.ddx -o bacula.dnd >/dev/null 2>/dev/null + makeindex bacula.fdx -o bacula.fnd >/dev/null 2>/dev/null + makeindex bacula.sdx -o bacula.snd >/dev/null 2>/dev/null + makeindex bacula.cdx -o bacula.cnd >/dev/null 2>/dev/null + -latex -interaction=batchmode bacula.tex + @rm -f *.eps *.old + + +devtex: + @cp -fp ${IMAGES}/hires/*.eps . + touch developers.idx developersi-general.tex + -latex -interaction=batchmode developers.tex + makeindex developers.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode developers.tex + @rm -f *.eps *.old + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 bacula.dvi +# Rename for loading on Web site + mv bacula.pdf dev-bacula.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf bacula.dvi bacula.pdf + @rm -f *.eps *.old + + +devpdf: + @echo "Making developers pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf developers.dvi developers.pdf + @rm -f *.eps *.old + +devpdfm: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 developers.dvi + @rm -f *.eps *.old + +html: + @echo "Making html" + @./translate_images.pl --from_meaningful_names bacula.html + @cp -fp ${IMAGES}/*.eps . + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl bacula >/dev/null + @echo + @./translate_images.pl --to_meaningful_names bacula.html + @rm -f *.eps *.gif *.jpg + +devhtml: + @echo "Making developers html" + @cp -fp ${IMAGES}/*.eps . + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + developers >/dev/null + ./translate_images.pl developers.html + @rm -f *.eps *.gif *.jpg *.old + + +web: + @echo "Making web" + @mkdir -p bacula + @./translate_images.pl --from_meaningful_names bacula/Bacula_Users_Guide.html + @rm -rf bacula/*.html + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps *.txt bacula + @rm -f bacula/next.eps bacula/next.png bacula/prev.eps bacula/prev.png bacula/up.eps bacula/up.png + latex2html -split 4 -local_icons -t "Bacula User's Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent bacula >/dev/null + @echo + @./translate_images.pl --to_meaningful_names bacula/Bacula_Users_Guide.html + @cp -f bacula/Bacula_Freque_Asked_Questi.html bacula/faq.html + @rm -f *.eps *.gif *.jpg bacula/*.eps *.old + +devweb: + @echo "Making developers web" + @mkdir -p developers + @rm -f developers/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png developers/ + @rm -f developers/next.eps developers/next.png developers/prev.eps developers/prev.png developers/up.eps developers/up.png + latex2html -split 5 -local_icons -t "Developer's Guide" -long_titles 4 \ + -contents_in_nav -toc_stars -white -notransparent developers >/dev/null + ./translate_images.pl developers/Developers_Guide.html + @cp -f developers/Developers_Guide.html developers/index.html + @rm -f *.eps *.gif *.jpg developers/*.eps *.old + +texcheck: + ./check_tex.pl bacula.tex + ./check_tex.pl developers.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf bacula developers + @rm -f baculai-dir.tex baculai-fd.tex baculai-sd.tex \ + baculai-console.tex baculai-general.tex images.tex developersi.tex + + +distclean: clean + @rm -f bacula.html bacula.pdf developers.html developers.pdf diff --git a/docs/manual-de/ansi-labels.tex b/docs/manual-de/ansi-labels.tex new file mode 100644 index 00000000..ee8664b9 --- /dev/null +++ b/docs/manual-de/ansi-labels.tex @@ -0,0 +1,50 @@ + +\section*{ANSI and IBM Tape Labels} +\label{_ChapterStart62} +\index[general]{ANSI and IBM Tape Labels} +\index[general]{Labels!Tape} +\addcontentsline{toc}{section}{ANSI and IBM Tape Labels} + +Bacula can support ANSI or IBM tape labels as long as you +enable it. In fact, with the proper configuration, you can +force Bacula to require ANSI or IBM labels. + +Bacula can create an ANSI or IBM label, but if Check Labels is +enabled (see below), Bacula will look for an existing label, and +if it is found, it will keep the label. Consequently, you +can label the tapes with programs other than Bacula, and Bacula +will recognize and support them. + +Even though Bacula will recognize and write ANSI and IBM labels, +it always writes its own tape labels as well. + +When using ANSI or IBM tape labeling, you must keep your Volume +names to a maximum of 6 characters. + + +\subsection*{Director Pool Directive} +\addcontentsline{toc}{section}{Director Pool Directive} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + This directive is implemented in the Director Pool resource and in the SD Device + resource. If it is specified in the SD Device resource, it will take + precedence over the value passed from the Director to the SD. +\end{description} + +\subsection*{Storage Daemon Device Directives} +\addcontentsline{toc}{section}{Storage Daemon Device Directives} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + This directive is implemented in the Director Pool resource and in the SD Device + resource. If it is specified in the the SD Device resource, it will take + precedence over the value passed from the Director to the SD. + +\item [Check Labels = yes | no] + This directive is implemented in the the SD Device resource. If you intend + to read ANSI or IBM labels, this *must* be set. Even if the volume is + not ANSI labeled, you can set this to yes, and Bacula will check the + label type. +\end{description} + diff --git a/docs/manual-de/autochangerres.tex b/docs/manual-de/autochangerres.tex new file mode 100644 index 00000000..a178a98f --- /dev/null +++ b/docs/manual-de/autochangerres.tex @@ -0,0 +1,68 @@ +\subsection*{Autochanger Resource} +\index[sd]{Autochanger Resource } +\index[sd]{Resource!Autochanger } +\addcontentsline{toc}{subsection}{Autochanger Resource} + +The Autochanger resource serves to group one or more Device resources +into one unit called an autochanger in Bacula (referred to +as a tape library by autochanger manufacturers). + +\begin{description} +\item [Name = \lt{}Autochanger-Name\gt{}] + \index[sd]{Name} + Specifies the Name of the Autochanger. This name is used in + the Director's Storage definition to refer to the autochanger. + This directive is required. + +\item [Device = \lt{}Device-name1, device-name2, ...\gt{}] + Specifies the names of the Device resource that corresponds + to the autochanger drive. If you have a multiple drive + autochanger, you must specify multiple Device names, each + one referring to a Device resource that contains a the + Drive Index specification that corresponds to the drive + number. You may specify multiple device names on + a single line separated by commas, and/or you may specify + multiple Device directives. + This directive is required. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + The specified {\bf name-string} gives the system file name of the autochanger + device name. If specified in this resource, the Changer Device name + is not needed in the Device resource. If it is specified in the Device + resource (see above), it will take precidence over one specified in + the Autochanger resource. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command } + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows. + If it is specified here, it need not be specified in the Device + resource. If it is specified in the Device resource, it will take + precidence over the one specified in the Autochanger resource. + +\end{description} + +The following is an example of a valid Autochanger resource definition: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "DDS-4-changer" + Device = DDS-4-1, DDS-4-2 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} +Device { + Name = "DDS-4-1" + Drive Index = 0 + ... +} +Device { + Name = "DDS-4-2" + Drive Index = 1 + ... +} +\end{verbatim} +\normalsize diff --git a/docs/manual-de/autochangers.tex b/docs/manual-de/autochangers.tex new file mode 100644 index 00000000..5fb372f4 --- /dev/null +++ b/docs/manual-de/autochangers.tex @@ -0,0 +1,849 @@ +%% +%% + +\section*{Autochangers Support} +\label{_ChapterStart18} +\index[general]{Support!Autochangers } +\index[general]{Autochangers Support } +\addcontentsline{toc}{section}{Autochangers Support} + +\subsection*{Autochangers -- General} +\index[general]{General!Autochangers -- } +\index[general]{Autochangers -- General } +\addcontentsline{toc}{subsection}{Autochangers -- General} + +Bacula provides autochanger support for reading +and writing tapes. In order to work with an autochanger, Bacula requires three +things, each of which is explained in more detail after this list: + +\begin{itemize} +\item A script that actually controls the autochanger according to commands + sent by Bacula. We furnish such a script that works with {\bf mtx} found in + the {\bf depkgs} distribution. This script works only with single drive + autochangers. +\item That each Volume (tape) to be used must be defined in the Catalog and + have a Slot number assigned to it so that Bacula knows where the Volume is in + the autochanger. This is generally done with the {\bf label} command. See + below for more details. +\item Modifications to your Storage daemon's Device configuration resource to + identify that the device is a changer, as well as a few other parameters. +\item You should also modify your Storage resource definition in the + Director's configuration file so that you are automatically prompted for the + Slot when labeling a Volume. +\end{itemize} + +In version 1.37, there is a new \ilink{Autochanger +resource}{AutochangerRes} that permits you to group Device resources thus +creating a multi-drive autochanger. + +Bacula uses its own {\bf mtx-changer} script to interface with a program +that actually does the tape changing. Thus in principle, {\bf mtx-changer} can +be adapted to function with any autochanger program. The current version of +{\bf mtx-changer} works with the {\bf mtx} program. However, FreeBSD users have +provided a script in the {\bf examples} directory that allows Bacula to use +the {\bf chio} program. + +Bacula also supports autochangers with barcode +readers. This support includes two new Console commands: {\bf label barcodes} +and {\bf update slots}. For more details on these commands, see the ``Barcode +Support'' section below. + +Current Bacula autochanger support does not include cleaning, stackers, or +silos. However, under certain conditions, you may be able to make Bacula +work with stackers (gravity feed and such). Support for multi-drive +autochangers requires the \ilink{Autochanger resource}{AutochangerRes} +introduced in version 1.37. This resource is also recommended for single +drive autochangers. + +In principle, if {\bf mtx} will operate your changer correctly, then it is +just a question of adapting the {\bf mtx-changer} script (or selecting one +already adapted) for proper interfacing. You can find a list of autochangers +supported by {\bf mtx} at the following link: +\elink{http://mtx.badtux.net/compatibility.php} +{http://mtx.badtux.net/compatibility.php}. +The home page for the {\bf mtx} project can be found at: +\elink{http://mtx.badtux.net/}{http://mtx.badtux.net/}. + +If you are having troubles, please use the {\bf auto} command in the {\bf +btape} program to test the functioning of your autochanger with Bacula. When +Bacula is running, please remember that for many distributions (e.g. FreeBSD, +Debian, ...) the Storage daemon runs as {\bf bacula.tape} rather than {\bf +root.root}, so you will need to ensure that the Storage daemon has sufficient +permissions to access the autochanger. + +\label{SCSI devices} +\subsection*{Knowing What SCSI Devices You Have} +\index[general]{Have!Knowing What SCSI Devices You } +\index[general]{Knowing What SCSI Devices You Have } +\index[general]{SCSI devices} +\index[general]{devices!SCSI} +\addcontentsline{toc}{subsection}{Knowing What SCSI Devices You Have} + +Under Linux, you can + +\footnotesize +\begin{verbatim} +cat /proc/scsi/scsi +\end{verbatim} +\normalsize + +to see what SCSI devices you have available. You can also: + +\footnotesize +\begin{verbatim} +cat /proc/scsi/sg/device_hdr /proc/scsi/sg/devices +\end{verbatim} +\normalsize + +to find out how to specify their control address ({\bf /dev/sg0} for the +first, {\bf /dev/sg1} for the second, ...) on the {\bf Changer Device = } +Bacula directive. + +Under FreeBSD, you can use: + +\footnotesize +\begin{verbatim} +camcontrol devlist +\end{verbatim} +\normalsize + +To list the SCSI devices as well as the {\bf /dev/passn} that you will use on +the Bacula {\bf Changer Device = } directive. + +\label{scripts} + +\subsection*{Example Scripts} +\index[general]{Scripts!Example } +\index[general]{Example Scripts } +\addcontentsline{toc}{subsection}{Example Scripts} + +Please read the sections below so that you understand how autochangers work +with Bacula. Although we supply a default {\bf mtx-changer} script, your +autochanger may require some additional changes. If you want to see examples +of configuration files and scripts, please look in the {\bf +\lt{}bacula-src\gt{}/examples/devices} directory where you will find an +example {\bf HP-autoloader.conf} Bacula Device resource, and several {\bf +mtx-changer} scripts that have been modified to work with different +autochangers. + +\label{Slots} + +\subsection*{Slots} +\index[general]{Slots } +\addcontentsline{toc}{subsection}{Slots} + +To properly address autochangers, Bacula must know which Volume is in each +{\bf slot} of the autochanger. Slots are where the changer cartridges reside +when not loaded into the drive. Bacula numbers these slots from one to the +number of cartridges contained in the autochanger. + +Bacula will not automatically use a Volume in your autochanger unless it is +labeled and the slot number is stored in the catalog and the Volume is marked +as InChanger. For each Volume in your +changer, you will, using the Console program, assign a slot. This information +is kept in {\bf Bacula's} catalog database along with the other data for the +volume. If no slot is given, or the slot is set to zero, Bacula will not +attempt to use the autochanger even if all the necessary configuration records +are present. In addition, the console {\bf mount} command does not cause +Bacula to operate the autochanger, it only tells Bacula to read any tape that +may be in the drive. + +You can check if the Slot number and InChanger flag are set by doing a: +\begin{verbatim} +list Volumes +\end{verbatim} + +in the Console program. + +\label{mult} + +\subsection*{Multiple Devices} +\index[general]{Devices!Multiple } +\index[general]{Multiple Devices } +\addcontentsline{toc}{subsection}{Multiple Devices} + +Some autochangers have more than one read/write device (drive). The +new +\ilink{Autochanger resource}{AutochangerRes} introduced in version +1.37 permits you to group Device resources, where each device +represents a drive. The Director may still reference the Devices (drives) +directly, but doing so, bypasses the proper functioning of the +drives together. Instead, the Director (in the Storage resource) +should reference the Autochanger resource name. Doing so permits +the Storage daemon to ensure that only one drive uses the mtx-changer +script at a time, and also that two drives don't reference the +same Volume. + +Multi-drive requires the use of the {\bf +Drive Index} directive in the Device resource of the Storage daemon's +configuration file. Drive numbers or the Device Index are numbered beginning +at zero, which is the default. To use the second Drive in an autochanger, you +need to define a second Device resource and set the Drive Index to one for +that device. In general, the second device will have the same {\bf Changer +Device} (control channel) as the first drive, but a different {\bf Archive +Device}. + +The current version (1.37.16) of Bacula does not coordinate between the two +drives, so you must make sure that Bacula doesn't attempt to mount the same +Volume on both drives at the same time. There are a number of ways to do this. +One is to use different pools for each drive. + +\label{ConfigRecords} +\subsection*{Device Configuration Records} +\index[general]{Records!Device Configuration } +\index[general]{Device Configuration Records } +\addcontentsline{toc}{subsection}{Device Configuration Records} + +Configuration of autochangers within Bacula is done in the Device resource of +the Storage daemon. Four records: {\bf Autochanger}, {\bf Changer Device}, +{\bf Changer Command}, and {\bf Maximum Changer Wait} control how Bacula uses +the autochanger. + +These four records, permitted in {\bf Device} resources, are described in +detail below. Note, however, that the {\bf Changer Device} and the +{\bf Changer Command} directives are not needed in the Device resource +if they are present in the {\bf Autochanger} resource. + +\begin{description} + +\item [Autochanger = {\it Yes|No} ] + \index[sd]{Autochanger } + The {\bf Autochanger} record specifies that the current device is or is not +an autochanger. The default is {\bf no}. + +\item [Changer Device = \lt{}device-name\gt{}] + \index[sd]{Changer Device } + In addition to the Archive Device name, you must specify a {\bf Changer +Device} name. This is because most autochangers are controlled through a +different device than is used for reading and writing the cartridges. For +example, on Linux, one normally uses the generic SCSI interface for +controlling the autochanger, but the standard SCSI interface for reading and +writing the tapes. On Linux, for the {\bf Archive Device = /dev/nst0}, you +would typically have {\bf Changer Device = /dev/sg0}. Note, some of the more +advanced autochangers will locate the changer device on {\bf /dev/sg1}. Such +devices typically have several drives and a large number of tapes. + +On FreeBSD systems, the changer device will typically be on {\bf /dev/pass0} +through {\bf /dev/passn}. + +On Solaris, the changer device will typically be some file under {\bf +/dev/rdsk}. + +\item [Changer Command = \lt{}command\gt{}] + \index[sd]{Changer Command } + This record is used to specify the external program to call and what +arguments to pass to it. The command is assumed to be a standard program or +shell script that can be executed by the operating system. This command is +invoked each time that Bacula wishes to manipulate the autochanger. The +following substitutions are made in the {\bf command} before it is sent to +the operating system for execution: + +\footnotesize +\begin{verbatim} + %% = % + %a = archive device name + %c = changer device name + %d = changer drive index base 0 + %f = Client's name + %j = Job name + %o = command (loaded, load, or unload) + %s = Slot base 0 + %S = Slot base 1 + %v = Volume name + +\end{verbatim} +\normalsize + +An actual example for using {\bf mtx} with the {\bf mtx-changer} script (part +of the Bacula distribution) is: + +\footnotesize +\begin{verbatim} +Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" + +\end{verbatim} +\normalsize + +Where you will need to adapt the {\bf /etc/bacula} to be the actual path on +your system where the mtx-changer script resides. Details of the three +commands currently used by Bacula (loaded, load, unload) as well as the +output expected by Bacula are give in the {\bf Bacula Autochanger Interface} +section below. + +\item [Maximum Changer Wait = \lt{}time\gt{}] + \index[sd]{Maximum Changer Wait } + This record is used to define the maximum amount of time that Bacula + will wait for an autoloader to respond to a command (e.g. load). The + default is set to 120 seconds. If you have a slow autoloader you may + want to set it longer. + +If the autoloader program fails to respond in this time, it will be killed +and Bacula will request operator intervention. + +\item [Drive Index = \lt{}number\gt{}] + \index[sd]{Drive Index } + This record allows you to tell Bacula to use the second or subsequent + drive in an autochanger with multiple drives. Since the drives are + numbered from zero, the second drive is defined by + +\footnotesize +\begin{verbatim} +Device Index = 1 + +\end{verbatim} +\normalsize + +To use the second drive, you need a second Device resource definition in the +Bacula configuration file. See the Multiple Drive section above in this +chapter for more information. +\end{description} + +In addition, for proper functioning of the Autochanger, you must +define an Autochanger resource. +\input{autochangerres} + +\label{example} +\subsection*{An Example Configuration File} +\index[general]{Example Configuration File } +\index[general]{File!Example Configuration } +\addcontentsline{toc}{subsection}{Example Configuration File} + +The following two resources implement an autochanger: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "Autochanger" + Device = DDS-4 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} + +Device { + Name = DDS-4 + Media Type = DDS-4 + Archive Device = /dev/nst0 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; + Mount Anonymous Volumes = no; +} +\end{verbatim} +\normalsize + +where you will adapt the {\bf Archive Device}, the {\bf Changer Device}, and +the path to the {\bf Changer Command} to correspond to the values used on your +system. + + +\label{SpecifyingSlots} +\subsection*{Specifying Slots When Labeling} +\index[general]{Specifying Slots When Labeling } +\index[general]{Labeling!Specifying Slots When } +\addcontentsline{toc}{subsection}{Specifying Slots When Labeling} + +If you add an {\bf Autochanger = yes} record to the Storage resource in your +Director's configuration file, the Bacula Console will automatically prompt +you for the slot number and whether or not the Volume is in the changer when +you {\bf add} or {\bf label} tapes for that Storage device. You must also set +{\bf Autochanger = yes} in the Device resource as we have described above in +order for the autochanger to be used. Please see the +\ilink{Storage Resource}{Autochanger1} in the Director's chapter +and the +\ilink{Device Resource}{Autochanger} in the Storage daemon +chapter for more details on these records. + +Thus all stages of dealing with tapes can be totally automated. It is also +possible to set or change the Slot using the {\bf update} command in the +Console and selecting {\bf Volume Parameters} to update. + +Even though all the above configuration statements are specified and correct, +Bacula will attempt to access the autochanger only if a {\bf slot} is non-zero +in the catalog Volume record (with the Volume name). + +If your autochanger has barcode labels, you can label all the Volumes in +your autochanger one after another by using the {\bf label barcodes} command. +For each tape in the changer containing a barcode, Bacula will mount the tape +and then label it with the same name as the barcode. An appropriate Media +record will also be created in the catalog. Any barcode that begins with the +same characters as specified on the ``CleaningPrefix=xxx'' command, will be +treated as a cleaning tape, and will not be labeled. For example with: + +\footnotesize +\begin{verbatim} +Pool { + Name ... + Cleaning Prefix = "CLN" +} +\end{verbatim} +\normalsize + +Any slot containing a barcode of CLNxxxx will be treated as a cleaning tape +and will not be mounted. + +\label{Magazines} + +\subsection*{Dealing with Multiple Magazines} +\index[general]{Dealing with Multiple Magazines } +\index[general]{Magazines!Dealing with Multiple } +\addcontentsline{toc}{subsection}{Dealing with Multiple Magazines} + +If you have several magazines or if you insert or remove cartridges from a +magazine, you will need to notify Bacula of this. By doing so, Bacula will as +a preference, use Volumes that it knows to be in the autochanger before +accessing Volumes that are not in the autochanger. This prevents unneeded +operator intervention. + +If your autochanger has barcodes (machine readable tape labels), the task of +informing Bacula is simple. Every time, you change a magazine, or add or +remove a cartridge from the magazine, simply do + +\footnotesize +\begin{verbatim} +update slots +\end{verbatim} +\normalsize + +in the Console program. This will cause Bacula to request the autochanger to +return the current Volume names in the magazine. This will be done without +actually accessing or reading the Volumes because the barcode reader does this +during inventory when the autochanger is first turned on. Bacula will ensure +that any Volumes that are currently marked as being in the magazine are marked +as no longer in the magazine, and the new list of Volumes will be marked as +being in the magazine. In addition, the Slot numbers of the Volumes will be +corrected in Bacula's catalog if they are incorrect (added or moved). + +If you do not have a barcode reader on your autochanger, you have several +alternatives. + +\begin{enumerate} +\item You can manually set the Slot and InChanger flag using the {\bf update + volume} command in the Console (quite painful). +\item You can issue a + + \footnotesize +\begin{verbatim} +update slots scan +\end{verbatim} +\normalsize + + command that will cause Bacula to read the label on each of the cartridges in + the magazine in turn and update the information (Slot, InChanger flag) in the + catalog. This is quite effective but does take time to load each cartridge + into the drive in turn and read the Volume label. +\item You can modify the mtx-changer script so that it simulates an + autochanger with barcodes. See below for more details. + \end{enumerate} + +\label{simulating} + +\subsection*{Simulating Barcodes in your Autochanger} +\index[general]{Autochanger!Simulating Barcodes in your } +\index[general]{Simulating Barcodes in your Autochanger } +\addcontentsline{toc}{subsection}{Simulating Barcodes in your Autochanger} + +You can simulate barcodes in your autochanger by making the {\bf mtx-changer} +script return the same information that an autochanger with barcodes would do. +This is done by commenting out the one and only line in the {\bf list)} case, +which is: + +\footnotesize +\begin{verbatim} + ${MTX} -f $ctl status | grep " *Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" +\end{verbatim} +\normalsize + +at approximately line 99 by putting a \# in column one of that line, or by +simply deleting it. Then in its place add a new line that prints the contents +of a file. For example: + +\footnotesize +\begin{verbatim} +cat /etc/bacula/changer.volumes +\end{verbatim} +\normalsize + +Be sure to include a full path to the file, which can have any name. The +contents of the file must be of the following format: + +\footnotesize +\begin{verbatim} +1:Volume1 +2:Volume2 +3:Volume3 +... +\end{verbatim} +\normalsize + +Where the 1, 2, 3 are the slot numbers and Volume1, Volume2, ... are the +Volume names in those slots. You can have multiple files that represent the +Volumes in different magazines, and when you change magazines, simply copy the +contents of the correct file into your {\bf /etc/bacula/changer.volumes} file. +There is no need to stop and start Bacula when you change magazines, simply +put the correct data in the file, then run the {\bf update slots} command, and +your autochanger will appear to Bacula to be an autochanger with barcodes. +\label{updateslots} + +\subsection*{The Full Form of the Update Slots Command} +\index[general]{Full Form of the Update Slots Command } +\index[general]{Command!Full Form of the Update Slots } +\addcontentsline{toc}{subsection}{Full Form of the Update Slots Command} + +If you change only one cartridge in the magazine, you may not want to scan all +Volumes, so the {\bf update slots} command (as well as the {\bf update slots +scan} command) has the additional form: + +\footnotesize +\begin{verbatim} +update slots=n1,n2,n3-n4, ... +\end{verbatim} +\normalsize + +where the keyword {\bf scan} can be appended or not. The n1,n2, ... represent +Slot numbers to be updated and the form n3-n4 represents a range of Slot +numbers to be updated (e.g. 4-7 will update Slots 4,5,6, and 7). + +This form is particularly useful if you want to do a scan (time expensive) and +restrict the update to one or two slots. + +For example, the command: + +\footnotesize +\begin{verbatim} +update slots=1,6 scan +\end{verbatim} +\normalsize + +will cause Bacula to load the Volume in Slot 1, read its Volume label and +update the Catalog. It will do the same for the Volume in Slot 6. The command: + + +\footnotesize +\begin{verbatim} +update slots=1-3,6 +\end{verbatim} +\normalsize + +will read the barcoded Volume names for slots 1,2,3 and 6 and make the +appropriate updates in the Catalog. If you don't have a barcode reader or have +not modified the mtx-changer script as described above, the above command will +not find any Volume names so will do nothing. +\label{FreeBSD} + +\subsection*{FreeBSD Issues} +\index[general]{Issues!FreeBSD } +\index[general]{FreeBSD Issues } +\addcontentsline{toc}{subsection}{FreeBSD Issues} + +If you are having problems on FreeBSD when Bacula tries to select a tape, and +the message is {\bf Device not configured}, this is because FreeBSD has made +the tape device {\bf /dev/nsa1} disappear when there is no tape mounted in the +autochanger slot. As a consequence, Bacula is unable to open the device. The +solution to the problem is to make sure that some tape is loaded into the tape +drive before starting Bacula. This problem is corrected in Bacula versions +1.32f-5 and later. + +Please see the +\ilink{ Tape Testing}{FreeBSDTapes} chapter of this manual for +{\bf important} information concerning your tape drive before doing the +autochanger testing. +\label{AutochangerTesting} + +\subsection*{Testing the Autochanger and Adapting Your mtx-changer Script} +\index[general]{Testing the Autochanger and Adapting Your mtx-changer Script } +\index[general]{Script!Testing the Autochanger and Adapting Your mtx-changer } +\addcontentsline{toc}{subsection}{Testing the Autochanger and Adapting Your +mtx-changer Script} + +Before attempting to use the autochanger with Bacula, it is preferable to +``hand-test'' that the changer works. To do so, we suggest you do the +following commands (assuming that the {\bf mtx-changer} script is installed in +{\bf /etc/bacula/mtx-changer}): + +\begin{description} + +\item [Make sure Bacula is not running.] + \index[sd]{Make sure Bacula is not running. } + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ list \ 0 \ /dev/nst0 \ 0] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ list \ 0 \ /dev/nst0 \ 0 + } +This command should print: + +\footnotesize +\begin{verbatim} + 1: + 2: + 3: + ... + +\end{verbatim} +\normalsize + +or one number per line for each slot that is occupied in your changer, and +the number should be terminated by a colon ({\bf :}). If your changer has +barcodes, the barcode will follow the colon. If an error message is printed, +you must resolve the problem (e.g. try a different SCSI control device name +if {\bf /dev/sg0} is incorrect. For example, on FreeBSD systems, the +autochanger SCSI control device is generally {\bf /dev/pass2}. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ slots \ 0 \ /dev/nst0 \ 0] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ slots \ 0 \ /dev/nst0 \ 0 + } +This command should return the number of slots in your autochanger. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ ] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ } + If a tape is loaded, this should cause it to be unloaded. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ load \ 3 \ /dev/nst0 \ 0 ] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ load \ 3 \ /dev/nst0 \ 0 + } +Assuming you have a tape in slot 3, it will be loaded into the read slot (0). + + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ loaded \ 0 \ /dev/nst0 \ 0] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ loaded \ 0 \ /dev/nst0 \ + 0 } +It should print ``3'' + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload] + \index[sd]{/etc/bacula/mtx-changer \ /dev/sg0 \ unload } + \end{description} + +Once all the above commands work correctly, assuming that you have the right +{\bf Changer Command} in your configuration, Bacula should be able to operate +the changer. The only remaining area of problems will be if your autoloader +needs some time to get the tape loaded after issuing the command. After the +{\bf mtx-changer} script returns, Bacula will immediately rewind and read the +tape. If Bacula gets rewind I/O errors after a tape change, you will probably +need to insert a {\bf sleep 20} after the {\bf mtx} command, but be careful to +exit the script with a zero status by adding {\bf exit 0} after any additional +commands you add to the script. This is because Bacula checks the return +status of the script, which should be zero if all went well. + +You can test whether or not you need a {\bf sleep} by putting the following +commands into a file and running it as a script: + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload +/etc/bacula/mtx-changer /dev/sg0 load 3 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +If the above script runs, you probably have no timing problems. If it does not +run, start by putting a {\bf sleep 30} or possibly a {\bf sleep 60} in the +script just after the mtx-changer load command. If that works, then you should +move the sleep into the actual {\bf mtx-changer} script so that it will be +effective when Bacula runs. + +A second problem that comes up with a small number of autochangers is that +they need to have the cartridge ejected before it can be removed. If this is +the case, the {\bf load 3} will never succeed regardless of how long you wait. +If this seems to be your problem, you can insert an eject just after the +unload so that the script looks like: + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload +mt -f /dev/st0 offline +/etc/bacula/mtx-changer /dev/sg0 load 3 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +Obviously, if you need the {\bf offline} command, you should move it into the +mtx-changer script ensuring that you save the status of the {\bf mtx} command +or always force an {\bf exit 0} from the script, because Bacula checks the +return status of the script. + +As noted earlier, there are several scripts in {\bf +\lt{}bacula-source\gt{}/examples/devices} that implement the above features, +so they may be a help to you in getting your script to work. + +If Bacula complains ``Rewind error on /dev/nst0. ERR=Input/output error.'' you +most likely need more sleep time in your {\bf mtx-changer} before returning to +Bacula after a load command has been completed. + +\label{using} + +\subsection*{Using the Autochanger} +\index[general]{Using the Autochanger } +\index[general]{Autochanger!Using the } +\addcontentsline{toc}{subsection}{Using the Autochanger} + +Let's assume that you have properly defined the necessary Storage daemon +Device records, and you have added the {\bf Autochanger = yes} record to the +Storage resource in your Director's configuration file. + +Now you fill your autochanger with say six blank tapes. + +What do you do to make Bacula access those tapes? + +One strategy is to prelabel each of the tapes. Do so by starting Bacula, then +with the Console program, enter the {\bf label} command: + +\footnotesize +\begin{verbatim} +./console +Connecting to Director rufus:8101 +1000 OK: rufus-dir Version: 1.26 (4 October 2002) +*label +\end{verbatim} +\normalsize + +it will then print something like: + +\footnotesize +\begin{verbatim} +Using default Catalog name=BackupDB DB=bacula +The defined Storage resources are: + 1: Autochanger + 2: File +Select Storage resource (1-2): 1 +\end{verbatim} +\normalsize + +I select the autochanger (1), and it prints: + +\footnotesize +\begin{verbatim} +Enter new Volume name: TestVolume1 +Enter slot (0 for none): 1 +\end{verbatim} +\normalsize + +where I entered {\bf TestVolume1} for the tape name, and slot {\bf 1} for the +slot. It then asks: + +\footnotesize +\begin{verbatim} +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 +\end{verbatim} +\normalsize + +I select the Default pool. This will be automatically done if you only have a +single pool, then Bacula will proceed to unload any loaded volume, load the +volume in slot 1 and label it. In this example, nothing was in the drive, so +it printed: + +\footnotesize +\begin{verbatim} +Connecting to Storage daemon Autochanger at localhost:9103 ... +Sending label command ... +3903 Issuing autochanger "load slot 1" command. +3000 OK label. Volume=TestVolume1 Device=/dev/nst0 +Media record for Volume=TestVolume1 successfully created. +Requesting mount Autochanger ... +3001 Device /dev/nst0 is mounted with Volume TestVolume1 +You have messages. +* +\end{verbatim} +\normalsize + +You may then proceed to label the other volumes. The messages will change +slightly because Bacula will unload the volume (just labeled TestVolume1) +before loading the next volume to be labeled. + +Once all your Volumes are labeled, Bacula will automatically load them as they +are needed. + +To ``see'' how you have labeled your Volumes, simply enter the {\bf list +volumes} command from the Console program, which should print something like +the following: + +\footnotesize +\begin{verbatim} +*{\bf list volumes} +Using default Catalog name=BackupDB DB=bacula +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| MedId | VolName | MedTyp | VolStat | Bites | LstWrt | VolReten | Recyc | Slot | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| 1 | TestVol1 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 1 | +| 2 | TestVol2 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 2 | +| 3 | TestVol3 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 3 | +| ... | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +\end{verbatim} +\normalsize + +\label{Barcodes} + +\subsection*{Barcode Support} +\index[general]{Support!Barcode } +\index[general]{Barcode Support } +\addcontentsline{toc}{subsection}{Barcode Support} + +Bacula provides barcode support with two Console commands, {\bf label +barcodes} and {\bf update slots}. + +The {\bf label barcodes} will cause Bacula to read the barcodes of all the +cassettes that are currently installed in the magazine (cassette holder) using +the {\bf mtx-changer} {\bf list} command. Each cassette is mounted in turn and +labeled with the same Volume name as the barcode. + +The {\bf update slots} command will first obtain the list of cassettes and +their barcodes from {\bf mtx-changer}. Then it will find each volume in turn +in the catalog database corresponding to the barcodes and set its Slot to +correspond to the value just read. If the Volume is not in the catalog, then +nothing will be done. This command is useful for synchronizing Bacula with the +current magazine in case you have changed magazines or in case you have moved +cassettes from one slot to another. + +The {\bf Cleaning Prefix} statement can be used in the Pool resource to define +a Volume name prefix, which if it matches that of the Volume (barcode) will +cause that Volume to be marked with a VolStatus of {\bf Cleaning}. This will +prevent Bacula from attempting to write on the Volume. + +\label{interface} + +\subsection*{Bacula Autochanger Interface} +\index[general]{Interface!Bacula Autochanger } +\index[general]{Bacula Autochanger Interface } +\addcontentsline{toc}{subsection}{Bacula Autochanger Interface} + +Bacula calls the autochanger script that you specify on the {\bf Changer +Device} statement. Normally this script will be the {\bf mtx-changer} script +that we can provide, but it can in fact be any program. The only requirements +are that the ``commands'' that Bacula uses are {\bf loaded}, {\bf load}, {\bf +unload}, {\bf list}, and {\bf slots}. In addition, +each of those commands must return the information in the precise format as +specified below: + +\footnotesize +\begin{verbatim} +- Currently the changer commands used are: + loaded -- returns number of the slot that is loaded in + the drive or 0 if the drive is empty. + load -- loads a specified slot (note, some autochangers + require a 30 second pause after this command) into + the drive. + unload -- unloads the device (returns cassette to its slot). + list -- returns one line for each cassette in the autochanger + in the format :. Where + the {\bf slot} is the non-zero integer representing + the slot number, and {\bf barcode} is the barcode + associated with the cassette if it exists and if you + autoloader supports barcodes. Otherwise the barcode + field is blank. + slots -- returns total number of slots in the autochanger. +\end{verbatim} +\normalsize + +Bacula checks the exit status of the program called, and if it is zero, the +data is accepted. If the exit status is non-zero, Bacula ignores any +information returned and treats the drive as if it is not an autochanger. diff --git a/docs/manual-de/bootstrap.tex b/docs/manual-de/bootstrap.tex new file mode 100644 index 00000000..e343b105 --- /dev/null +++ b/docs/manual-de/bootstrap.tex @@ -0,0 +1,388 @@ +%% +%% + +\section*{The Bootstrap File} +\label{_ChapterStart43} +\index[general]{File!Bootstrap } +\index[general]{Bootstrap File } +\addcontentsline{toc}{section}{Bootstrap File} + +The information in this chapter is provided so that you may either create your +own bootstrap files, or so that you can edit a bootstrap file produced by {\bf +Bacula}. However, normally the bootstrap file will be automatically created +for you during the +\ilink{restore}{_ChapterStart13} command in the Console program, or +by using a +\ilink{ Write Bootstrap}{writebootstrap} record in your Backup +Jobs, and thus you will never need to know the details of this file. + +The {\bf bootstrap} file contains ASCII information that permits precise +specification of what files should be restored. It is a relatively compact +form of specifying the information, is human readable, and can be edited with +any text editor. + +\subsection*{File Format} +\index[general]{Format!File } +\index[general]{File Format } +\addcontentsline{toc}{subsection}{File Format} + +The general format of a {\bf bootstrap} file is: + +{\bf \lt{}keyword\gt{}= \lt{}value\gt{}} + +Where each {\bf keyword} and the {\bf value} specify which files to restore. +More precisely the {\bf keyword} and their {\bf values} serve to limit which +files will be restored and thus act as a filter. The absence of a keyword +means that all records will be accepted. + +Blank lines and lines beginning with a pound sign (\#) in the bootstrap file +are ignored. + +There are keywords which permit filtering by Volume, Client, Job, FileIndex, +Session Id, Session Time, ... + +The more keywords that are specified, the more selective the specification of +which files to restore will be. In fact, each keyword is {\bf AND}ed with +other keywords that may be present. + +For example, + +\footnotesize +\begin{verbatim} +Volume = Test-001 +VolSessionId = 1 +VolSessionTime = 108927638 +\end{verbatim} +\normalsize + +directs the Storage daemon (or the {\bf bextract} program) to restore only +those files on Volume Test-001 {\bf AND} having VolumeSessionId equal to one +{\bf AND} having VolumeSession time equal to 108927638. + +The full set of permitted keywords presented in the order in which they are +matched against the Volume records are: + +\begin{description} + +\item [Volume] + \index[fd]{Volume } + The value field specifies what Volume the following commands apply to. Each +Volume specification becomes the current Volume, to which all the following +commands apply until a new current Volume (if any) is specified. If the +Volume name contains spaces, it should be enclosed in quotes. + +\item [Count] + \index[fd]{Count } + The value is the total number of files that will be restored for this Volume. +This allows the Storage daemon to know when to stop reading the Volume. + +\item [VolFile] + \index[fd]{VolFile } + The value is a file number, a list of file numbers, or a range of file +numbers to match on the current Volume. The file number represents +the physical file on the Volume where the data is stored. For a tape volume, +this record is used to position to the correct starting file, and once the +tape is past the last specified file, reading will stop. + +\item [VolBlock] + \index[fd]{VolBlock } + The value is a block number, a list of block numbers, or a range of block +numbers to match on the current Volume. The block number represents +the physical block on the Volume where the data is stored. This record is +currently not used. + +\item [VolSessionTime] + \index[fd]{VolSessionTime } + The value specifies a Volume Session Time to be matched from the current +volume. + +\item [VolSessionId] + \index[fd]{VolSessionId } + The value specifies a VolSessionId, a list of volume session ids, or a range +of volume session ids to be matched from the current Volume. Each +VolSessionId and VolSessionTime pair corresponds to a unique Job that is +backed up on the Volume. + +\item [JobId] + \index[fd]{JobId } + The value specifies a JobId, list of JobIds, or range of JobIds to be +selected from the current Volume. Note, the JobId may not be unique if you +have multiple Directors, or if you have reinitialized your database. The +JobId filter works only if you do not run multiple simultaneous jobs. + +\item [Job] + \index[fd]{Job } + The value specifies a Job name or list of Job names to be matched on the +current Volume. The Job corresponds to a unique VolSessionId and +VolSessionTime pair. However, the Job is perhaps a bit more readable by +humans. Standard regular expressions (wildcards) may be used to match Job +names. The Job filter works only if you do not run multiple simultaneous +jobs. + +\item [Client] + \index[fd]{Client } + The value specifies a Client name or list of Clients to will be matched on +the current Volume. Standard regular expressions (wildcards) may be used to +match Client names. The Client filter works only if you do not run multiple +simultaneous jobs. + +\item [FileIndex] + \index[fd]{FileIndex } + The value specifies a FileIndex, list of FileIndexes, or range of FileIndexes +to be selected from the current Volume. Each file (data) stored on a Volume +within a Session has a unique FileIndex. For each Session, the first file +written is assigned FileIndex equal to one and incremented for each file +backed up. + +This for a given Volume, the triple VolSessionId, VolSessionTime, and +FileIndex uniquely identifies a file stored on the Volume. Multiple copies of +the same file may be stored on the same Volume, but for each file, the triple +VolSessionId, VolSessionTime, and FileIndex will be unique. This triple is +stored in the Catalog database for each file. + +\item [Slot] + \index[fd]{Slot } + The value specifies the autochanger slot. There may be only a single {\bf +Slot} specification for each Volume. + +\item [Stream] + \index[fd]{Stream } + The value specifies a Stream, a list of Streams, or a range of Streams to be +selected from the current Volume. Unless you really know what you are doing +(the internals of {\bf Bacula}, you should avoid this specification. + +\item [*JobType] + \index[fd]{*JobType } + Not yet implemented. + +\item [*JobLevel] + \index[fd]{*JobLevel } + Not yet implemented. +\end{description} + +The {\bf Volume} record is a bit special in that it must be the first record. +The other keyword records may appear in any order and any number following a +Volume record. + +Multiple Volume records may be specified in the same bootstrap file, but each +one starts a new set of filter criteria for the Volume. + +In processing the bootstrap file within the current Volume, each filter +specified by a keyword is {\bf AND}ed with the next. Thus, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} Client records for {\bf +My machine} {\bf AND} FileIndex equal to {\bf one}. + +Multiple occurrences of the same record are {\bf OR}ed together. Thus, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +Client = "Backup machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} (Client records for {\bf +My machine} {\bf OR} {\bf Backup machine}) {\bf AND} FileIndex equal to {\bf +one}. + +For integer values, you may supply a range or a list, and for all other values +except Volumes, you may specify a list. A list is equivalent to multiple +records of the same keyword. For example, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine", "Backup machine" +FileIndex = 1-20, 35 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} {\bf (}Client records for +{\bf My machine} {\bf OR} {\bf Backup machine}{\bf )} {\bf AND} {\bf +(}FileIndex 1 {\bf OR} 2 {\bf OR} 3 ... {\bf OR} 20 {\bf OR} 35{\bf )}. + +As previously mentioned above, there may be multiple Volume records in the +same bootstrap file. Each new Volume definition begins a new set of filter +conditions that apply to that Volume and will be {\bf OR}ed with any other +Volume definitions. + +As an example, suppose we query for the current set of tapes to restore all +files on Client {\bf Rufus} using the {\bf query} command in the console +program: + +\footnotesize +\begin{verbatim} +Using default Catalog name=MySQL DB=bacula +*query +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 10 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: +Choose a query (1-8): 8 +Enter Client Name: Rufus ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 154 | 2002-05-30 12:08 | test-02 | 0 | 1 | 1022753312 | +| 202 | 2002-06-15 10:16 | test-02 | 0 | 2 | 1024128917 | +| 203 | 2002-06-15 11:12 | test-02 | 3 | 1 | 1024132350 | +| 204 | 2002-06-18 08:11 | test-02 | 4 | 1 | 1024380678 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +The output shows us that there are four Jobs that must be restored. The first +one is a Full backup, and the following three are all Incremental backups. + +The following bootstrap file will restore those files: + +\footnotesize +\begin{verbatim} +Volume=test-02 +VolSessionId=1 +VolSessionTime=1022753312 +Volume=test-02 +VolSessionId=2 +VolSessionTime=1024128917 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024132350 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024380678 +\end{verbatim} +\normalsize + +As a final example, assume that the initial Full save spanned two Volumes. The +output from {\bf query} might look like: + +\footnotesize +\begin{verbatim} ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 242 | 2002-06-25 16:50 | File0003 | 0 | 1 | 1025016612 | +| 242 | 2002-06-25 16:50 | File0004 | 0 | 1 | 1025016612 | +| 243 | 2002-06-25 16:52 | File0005 | 0 | 2 | 1025016612 | +| 246 | 2002-06-25 19:19 | File0006 | 0 | 2 | 1025025494 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +and the following bootstrap file would restore those files: + +\footnotesize +\begin{verbatim} +Volume=File0003 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0004 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0005 +VolSessionId=2 +VolSessionTime=1025016612 +Volume=File0006 +VolSessionId=2 +VolSessionTime=1025025494 +\end{verbatim} +\normalsize + +\subsection*{Automatic Generation of Bootstrap Files} +\index[general]{Files!Automatic Generation of Bootstrap } +\index[general]{Automatic Generation of Bootstrap Files } +\addcontentsline{toc}{subsection}{Automatic Generation of Bootstrap Files} + +One thing that is probably worth knowing: the bootstrap files that are +generated automatically at the end of the job are not as optimized as those +generated by the restore command. This is because the ones created at the end +of the file, contain all files written to the Volume for that job. As a +consequence, all the files saved to an Incremental or Differential job will be +restored first by the Full save, then by any Incremental or Differential +saves. + +When the bootstrap file is generated for the restore command, only one copy +(the most recent) of each file is restored. + +So if you have spare cycles on your machine, you could optimize the bootstrap +files by doing the following: + +\footnotesize +\begin{verbatim} + ./console + restore client=xxx select all + no + quit + Backup bootstrap file. +\end{verbatim} +\normalsize + +The above will not work if you have multiple FileSets because that will be an +extra prompt. However, the {\bf restore client=xxx select all} builds the +in-memory tree, selecting everything and creates the bootstrap file. + +The {\bf no} answers the {\bf Do you want to run this (yes/mod/no)} question. + +\subsection*{A Final Example} +\index[general]{Example!Final } +\index[general]{Final Example } +\addcontentsline{toc}{subsection}{Final Example} + +If you want to extract or copy a single Job, you can do it by selecting by +JobId (code not tested) or better yet, if you know the VolSessionTime and the +VolSessionId (printed on Job report and in Catalog), specifying this is by far +the best. Using the VolSessionTime and VolSessionId is the way Bacula does +restores. A bsr file might look like the following: + +\footnotesize +\begin{verbatim} +Volume="Vol001" +VolSessionId=10 +VolSessionTime=1080847820 +\end{verbatim} +\normalsize + +If you know how many files are backed up (on the job report), you can +enormously speed up the selection by adding (let's assume there are 157 +files): + +\footnotesize +\begin{verbatim} +FileIndex=1-157 +Count=157 +\end{verbatim} +\normalsize + +Finally, if you know the File number where the Job starts, you can also cause +bcopy to forward space to the right file without reading every record: + +\footnotesize +\begin{verbatim} +VolFile=20 +\end{verbatim} +\normalsize + +There is nothing magic or complicated about a BSR file. Parsing it and +properly applying it within Bacula *is* magic, but you don't need to worry +about that. + +If you want to see a *real* bsr file, simply fire up the {\bf restore} command +in the console program, select something, then answer no when it prompts to +run the job. Then look at the file {\bf restore.bsr} in your working +directory. diff --git a/docs/manual-de/bugs.tex b/docs/manual-de/bugs.tex new file mode 100644 index 00000000..2235674b --- /dev/null +++ b/docs/manual-de/bugs.tex @@ -0,0 +1,22 @@ +%% +%% + +\section*{Bacula Bugs} +\label{_ChapterStart4} +\index[general]{Bacula Bugs } +\index[general]{Bugs!Bacula } +\addcontentsline{toc}{section}{Bacula Bugs} + +Well fortunately there are not too many bugs, but thanks to Dan Langille, we +have a +\elink{bugs database}{http://bugs.bacula.org} where bugs are reported. +Generally, when a bug is fixed, a patch for the currently released version will +be attached to the bug report. + +The directory {\bf patches} in the current CVS always contains a list of +the patches that have been created for the previously released version +of Bacula. In addition, the file {\bf patches-version-number} in the +{\bf patches} directory contains a summary of each of the patches. + +A ``raw'' list of the current task list and known issues can be found in {\bf +kernstodo} in the main Bacula source directory. diff --git a/docs/manual-de/catalog.tex b/docs/manual-de/catalog.tex new file mode 100644 index 00000000..eebe59bc --- /dev/null +++ b/docs/manual-de/catalog.tex @@ -0,0 +1,929 @@ +%% +%% + +\section*{Catalog Services} +\label{_ChapterStart30} +\index[general]{Services!Catalog } +\index[general]{Catalog Services } +\addcontentsline{toc}{section}{Catalog Services} + +\subsection*{General} +\index[general]{General } +\addcontentsline{toc}{subsection}{General} + +This chapter is intended to be a technical discussion of the Catalog services +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +The {\bf Bacula Catalog} services consist of the programs that provide the SQL +database engine for storage and retrieval of all information concerning files +that were backed up and their locations on the storage media. + +We have investigated the possibility of using the following SQL engines for +Bacula: Beagle, mSQL, GNU SQL, PostgreSQL, SQLite, Oracle, and MySQL. Each +presents certain problems with either licensing or maturity. At present, we +have chosen for development purposes to use MySQL, PostgreSQL and SQLite. +MySQL was chosen because it is fast, proven to be reliable, widely used, and +actively being developed. MySQL is released under the GNU GPL license. +PostgreSQL was chosen because it is a full-featured, very mature database, and +because Dan Langille did the Bacula driver for it. PostgreSQL is distributed +under the BSD license. SQLite was chosen because it is small, efficient, and +can be directly embedded in {\bf Bacula} thus requiring much less effort from +the system administrator or person building {\bf Bacula}. In our testing +SQLite has performed very well, and for the functions that we use, it has +never encountered any errors except that it does not appear to handle +databases larger than 2GBytes. + +The Bacula SQL code has been written in a manner that will allow it to be +easily modified to support any of the current SQL database systems on the +market (for example: mSQL, iODBC, unixODBC, Solid, OpenLink ODBC, EasySoft +ODBC, InterBase, Oracle8, Oracle7, and DB2). + +If you do not specify either {\bf \verb{--{with-mysql} or {\bf \verb{--{with-postgresql} or +{\bf \verb{--{with-sqlite} on the ./configure line, Bacula will use its minimalist +internal database. This database is kept for build reasons but is no longer +supported. Bacula {\bf requires} one of the three databases (MySQL, +PostgreSQL, or SQLite) to run. + +\subsubsection*{Filenames and Maximum Filename Length} +\index[general]{Filenames and Maximum Filename Length } +\index[general]{Length!Filenames and Maximum Filename } +\addcontentsline{toc}{subsubsection}{Filenames and Maximum Filename Length} + +In general, either MySQL, PostgreSQL or SQLite permit storing arbitrary long +path names and file names in the catalog database. In practice, there still +may be one or two places in the Catalog interface code that restrict the +maximum path length to 512 characters and the maximum file name length to 512 +characters. These restrictions are believed to have been removed. Please note, +these restrictions apply only to the Catalog database and thus to your ability +to list online the files saved during any job. All information received and +stored by the Storage daemon (normally on tape) allows and handles arbitrarily +long path and filenames. + +\subsubsection*{Installing and Configuring MySQL} +\index[general]{MySQL!Installing and Configuring } +\index[general]{Installing and Configuring MySQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL} + +For the details of installing and configuring MySQL, please see the +\ilink{Installing and Configuring MySQL}{_ChapterStart} chapter of +this manual. + +\subsubsection*{Installing and Configuring PostgreSQL} +\index[general]{PostgreSQL!Installing and Configuring } +\index[general]{Installing and Configuring PostgreSQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL} + +For the details of installing and configuring PostgreSQL, please see the +\ilink{Installing and Configuring PostgreSQL}{_ChapterStart10} +chapter of this manual. + +\subsubsection*{Installing and Configuring SQLite} +\index[general]{Installing and Configuring SQLite } +\index[general]{SQLite!Installing and Configuring } +\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite} + +For the details of installing and configuring SQLite, please see the +\ilink{Installing and Configuring SQLite}{_ChapterStart33} chapter of +this manual. + +\subsubsection*{Internal Bacula Catalog} +\index[general]{Catalog!Internal Bacula } +\index[general]{Internal Bacula Catalog } +\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog} + +Please see the +\ilink{Internal Bacula Database}{_ChapterStart42} chapter of this +manual for more details. + +\subsubsection*{Database Table Design} +\index[general]{Design!Database Table } +\index[general]{Database Table Design } +\addcontentsline{toc}{subsubsection}{Database Table Design} + +All discussions that follow pertain to the MySQL database. The details for the +PostgreSQL and SQLite databases are essentially identical except for that all +fields in the SQLite database are stored as ASCII text and some of the +database creation statements are a bit different. The details of the internal +Bacula catalog are not discussed here. + +Because the Catalog database may contain very large amounts of data for large +sites, we have made a modest attempt to normalize the data tables to reduce +redundant information. While reducing the size of the database significantly, +it does, unfortunately, add some complications to the structures. + +In simple terms, the Catalog database must contain a record of all Jobs run by +Bacula, and for each Job, it must maintain a list of all files saved, with +their File Attributes (permissions, create date, ...), and the location and +Media on which the file is stored. This is seemingly a simple task, but it +represents a huge amount interlinked data. Note: the list of files and their +attributes is not maintained when using the internal Bacula database. The data +stored in the File records, which allows the user or administrator to obtain a +list of all files backed up during a job, is by far the largest volume of +information put into the Catalog database. + +Although the Catalog database has been designed to handle backup data for +multiple clients, some users may want to maintain multiple databases, one for +each machine to be backed up. This reduces the risk of confusion of accidental +restoring a file to the wrong machine as well as reducing the amount of data +in a single database, thus increasing efficiency and reducing the impact of a +lost or damaged database. + +\subsection*{Sequence of Creation of Records for a Save Job} +\index[general]{Sequence of Creation of Records for a Save Job } +\index[general]{Job!Sequence of Creation of Records for a Save } +\addcontentsline{toc}{subsection}{Sequence of Creation of Records for a Save +Job} + +Start with StartDate, ClientName, Filename, Path, Attributes, MediaName, +MediaCoordinates. (PartNumber, NumParts). In the steps below, ``Create new'' +means to create a new record whether or not it is unique. ``Create unique'' +means each record in the database should be unique. Thus, one must first +search to see if the record exists, and only if not should a new one be +created, otherwise the existing RecordId should be used. + +\begin{enumerate} +\item Create new Job record with StartDate; save JobId +\item Create unique Media record; save MediaId +\item Create unique Client record; save ClientId +\item Create unique Filename record; save FilenameId +\item Create unique Path record; save PathId +\item Create unique Attribute record; save AttributeId + store ClientId, FilenameId, PathId, and Attributes +\item Create new File record + store JobId, AttributeId, MediaCoordinates, etc +\item Repeat steps 4 through 8 for each file +\item Create a JobMedia record; save MediaId +\item Update Job record filling in EndDate and other Job statistics + \end{enumerate} + +\subsection*{Database Tables} +\index[general]{Database Tables } +\index[general]{Tables!Database } +\addcontentsline{toc}{subsection}{Database Tables} + +\addcontentsline{lot}{table}{Filename Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Filename } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{l| }{\bf Data Type } +& \multicolumn{1}{l| }{\bf Remark } \\ + \hline +{FilenameId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Blob } & {Filename } +\\ \hline + +\end{longtable} + +The {\bf Filename} table shown above contains the name of each file backed up +with the path removed. If different directories or machines contain the same +filename, only one copy will be saved in this table. + +\ + +\addcontentsline{lot}{table}{Path Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Path } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PathId } & {integer } & {Primary Key } \\ + \hline +{Path } & {Blob } & {Full Path } +\\ \hline + +\end{longtable} + +The {\bf Path} table contains shown above the path or directory names of all +directories on the system or systems. The filename and any MSDOS disk name are +stripped off. As with the filename, only one copy of each directory name is +kept regardless of how many machines or drives have the same directory. These +path names should be stored in Unix path name format. + +Some simple testing on a Linux file system indicates that separating the +filename and the path may be more complication than is warranted by the space +savings. For example, this system has a total of 89,097 files, 60,467 of which +have unique filenames, and there are 4,374 unique paths. + +Finding all those files and doing two stats() per file takes an average wall +clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux. + +Finding all those files and putting them directly into a MySQL database with +the path and filename defined as TEXT, which is variable length up to 65,535 +characters takes 19 mins 31 seconds and creates a 27.6 MByte database. + +Doing the same thing, but inserting them into Blob fields with the filename +indexed on the first 30 characters and the path name indexed on the 255 (max) +characters takes 5 mins 18 seconds and creates a 5.24 MB database. Rerunning +the job (with the database already created) takes about 2 mins 50 seconds. + +Running the same as the last one (Path and Filename Blob), but Filename +indexed on the first 30 characters and the Path on the first 50 characters +(linear search done there after) takes 5 mins on the average and creates a 3.4 +MB database. Rerunning with the data already in the DB takes 3 mins 35 +seconds. + +Finally, saving only the full path name rather than splitting the path and the +file, and indexing it on the first 50 characters takes 6 mins 43 seconds and +creates a 7.35 MB database. + +\ + +\addcontentsline{lot}{table}{File Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf File } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileId } & {integer } & {Primary Key } \\ + \hline +{FileIndex } & {integer } & {The sequential file number in the Job } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{PathId } & {integer } & {Link to Path Record } \\ + \hline +{FilenameId } & {integer } & {Link to Filename Record } \\ + \hline +{MarkId } & {integer } & {Used to mark files during Verify Jobs } \\ + \hline +{LStat } & {tinyblob } & {File attributes in base64 encoding } \\ + \hline +{MD5 } & {tinyblob } & {MD5 signature in base64 encoding } +\\ \hline + +\end{longtable} + +The {\bf File} table shown above contains one entry for each file backed up by +Bacula. Thus a file that is backed up multiple times (as is normal) will have +multiple entries in the File table. This will probably be the table with the +most number of records. Consequently, it is essential to keep the size of this +record to an absolute minimum. At the same time, this table must contain all +the information (or pointers to the information) about the file and where it +is backed up. Since a file may be backed up many times without having changed, +the path and filename are stored in separate tables. + +This table contains by far the largest amount of information in the Catalog +database, both from the stand point of number of records, and the stand point +of total database size. As a consequence, the user must take care to +periodically reduce the number of File records using the {\bf retention} +command in the Console program. + +\ + +\addcontentsline{lot}{table}{Job Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf Job } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobId } & {integer } & {Primary Key } \\ + \hline +{Job } & {tinyblob } & {Unique Job Name } \\ + \hline +{Name } & {tinyblob } & {Job Name } \\ + \hline +{PurgedFiles } & {tinyint } & {Used by Bacula for purging/retention periods +} \\ + \hline +{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration +} \\ + \hline +{Level } & {binary(1) } & {Job Level } \\ + \hline +{ClientId } & {integer } & {Client index } \\ + \hline +{JobStatus } & {binary(1) } & {Job Termination Status } \\ + \hline +{SchedTime } & {datetime } & {Time/date when Job scheduled } \\ + \hline +{StartTime } & {datetime } & {Time/date when Job started } \\ + \hline +{EndTime } & {datetime } & {Time/date when Job ended } \\ + \hline +{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for +Retention period. } \\ + \hline +{VolSessionId } & {integer } & {Unique Volume Session ID } \\ + \hline +{VolSessionTime } & {integer } & {Unique Volume Session Time } \\ + \hline +{JobFiles } & {integer } & {Number of files saved in Job } \\ + \hline +{JobBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{JobErrors } & {integer } & {Number of errors during Job } \\ + \hline +{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) } +\\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{FileSetId } & {integer } & {Link to FileSet Record } \\ + \hline +{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\ + \hline +{HasBase } & {tiny integer } & {Set when Base Job run } +\\ \hline + +\end{longtable} + +The {\bf Job} table contains one record for each Job run by Bacula. Thus +normally, there will be one per day per machine added to the database. Note, +the JobId is used to index Job records in the database, and it often is shown +to the user in the Console program. However, care must be taken with its use +as it is not unique from database to database. For example, the user may have +a database for Client data saved on machine Rufus and another database for +Client data saved on machine Roxie. In this case, the two database will each +have JobIds that match those in another database. For a unique reference to a +Job, see Job below. + +The Name field of the Job record corresponds to the Name resource record given +in the Director's configuration file. Thus it is a generic name, and it will +be normal to find many Jobs (or even all Jobs) with the same Name. + +The Job field contains a combination of the Name and the schedule time of the +Job by the Director. Thus for a given Director, even with multiple Catalog +databases, the Job will contain a unique name that represents the Job. + +For a given Storage daemon, the VolSessionId and VolSessionTime form a unique +identification of the Job. This will be the case even if multiple Directors +are using the same Storage daemon. + +The Job Type (or simply Type) can have one of the following values: + +\addcontentsline{lot}{table}{Job Types} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{B } & {Backup Job } \\ + \hline +{V } & {Verify Job } \\ + \hline +{R } & {Restore Job } \\ + \hline +{C } & {Console program (not in database) } \\ + \hline +{D } & {Admin Job } \\ + \hline +{A } & {Archive Job (not implemented) } +\\ \hline + +\end{longtable} + +The JobStatus field specifies how the job terminated, and can be one of the +following: + +\addcontentsline{lot}{table}{Job Statuses} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{C } & {Created but not yet running } \\ + \hline +{R } & {Running } \\ + \hline +{B } & {Blocked } \\ + \hline +{T } & {Terminated normally } \\ + \hline +{E } & {Terminated in Error } \\ + \hline +{e } & {Non-fatal error } \\ + \hline +{f } & {Fatal error } \\ + \hline +{D } & {Verify Differences } \\ + \hline +{A } & {Canceled by the user } \\ + \hline +{F } & {Waiting on the File daemon } \\ + \hline +{S } & {Waiting on the Storage daemon } \\ + \hline +{m } & {Waiting for a new Volume to be mounted } \\ + \hline +{M } & {Waiting for a Mount } \\ + \hline +{s } & {Waiting for Storage resource } \\ + \hline +{j } & {Waiting for Job resource } \\ + \hline +{c } & {Waiting for Client resource } \\ + \hline +{d } & {Wating for Maximum jobs } \\ + \hline +{t } & {Waiting for Start Time } \\ + \hline +{p } & {Waiting for higher priority job to finish } +\\ \hline + +\end{longtable} + +\ + +\addcontentsline{lot}{table}{File Sets Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf FileSet } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileSetId } & {integer } & {Primary Key } \\ + \hline +{FileSet } & {tinyblob } & {FileSet name } \\ + \hline +{MD5 } & {tinyblob } & {MD5 checksum of FileSet } \\ + \hline +{CreateTime } & {datetime } & {Time and date Fileset created } +\\ \hline + +\end{longtable} + +The {\bf FileSet} table contains one entry for each FileSet that is used. The +MD5 signature is kept to ensure that if the user changes anything inside the +FileSet, it will be detected and the new FileSet will be used. This is +particularly important when doing an incremental update. If the user deletes a +file or adds a file, we need to ensure that a Full backup is done prior to the +next incremental. + +\ + +\addcontentsline{lot}{table}{JobMedia Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf JobMedia } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobMediaId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{MediaId } & {integer } & {Link to Media Record } \\ + \hline +{FirstIndex } & {integer } & {The index (sequence number) of the first file +written for this Job to the Media } \\ + \hline +{LastIndex } & {integer } & {The index of the last file written for this +Job to the Media } \\ + \hline +{StartFile } & {integer } & {The physical media (tape) file number of the +first block written for this Job } \\ + \hline +{EndFile } & {integer } & {The physical media (tape) file number of the +last block written for this Job } \\ + \hline +{StartBlock } & {integer } & {The number of the first block written for +this Job } \\ + \hline +{EndBlock } & {integer } & {The number of the last block written for this +Job } \\ + \hline +{VolIndex } & {integer } & {The Volume use sequence number within the Job } +\\ \hline + +\end{longtable} + +The {\bf JobMedia} table contains one entry for each volume written for the +current Job. If the Job spans 3 tapes, there will be three JobMedia records, +each containing the information to find all the files for the given JobId on +the tape. + +\ + +\addcontentsline{lot}{table}{Media Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Media } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{MediaId } & {integer } & {Primary Key } \\ + \hline +{VolumeName } & {tinyblob } & {Volume name } \\ + \hline +{Slot } & {integer } & {Autochanger Slot number or zero } \\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{MediaType } & {tinyblob } & {The MediaType supplied by the user } \\ + \hline +{FirstWritten } & {datetime } & {Time/date when first written } \\ + \hline +{LastWritten } & {datetime } & {Time/date when last written } \\ + \hline +{LabelDate } & {datetime } & {Time/date when tape labeled } \\ + \hline +{VolJobs } & {integer } & {Number of jobs written to this media } \\ + \hline +{VolFiles } & {integer } & {Number of files written to this media } \\ + \hline +{VolBlocks } & {integer } & {Number of blocks written to this media } \\ + \hline +{VolMounts } & {integer } & {Number of time media mounted } \\ + \hline +{VolBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{VolErrors } & {integer } & {Number of errors during Job } \\ + \hline +{VolWrites } & {integer } & {Number of writes to media } \\ + \hline +{MaxVolBytes } & {bigint } & {Maximum bytes to put on this media } \\ + \hline +{VolCapacityBytes } & {bigint } & {Capacity estimate for this volume } \\ + \hline +{VolStatus } & {enum } & {Status of media: Full, Archive, Append, Recycle, +Read-Only, Disabled, Error, Busy } \\ + \hline +{Recycle } & {tinyint } & {Whether or not Bacula can recycle the Volumes: +Yes, No } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds until expiration } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {maximum jobs to put on Volume } \\ + \hline +{MaxVolFiles } & {integer } & {maximume EOF marks to put on Volume } +\\ \hline + +\end{longtable} + +The {\bf Volume} table (internally referred to as the Media table) contains +one entry for each volume, that is each tape, cassette (8mm, DLT, DAT, ...), +or file on which information is or was backed up. There is one Volume record +created for each of the NumVols specified in the Pool resource record. + +\ + +\addcontentsline{lot}{table}{Pool Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Pool } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PoolId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Tinyblob } & {Pool Name } \\ + \hline +{NumVols } & {Integer } & {Number of Volumes in the Pool } \\ + \hline +{MaxVols } & {Integer } & {Maximum Volumes in the Pool } \\ + \hline +{UseOnce } & {tinyint } & {Use volume once } \\ + \hline +{UseCatalog } & {tinyint } & {Set to use catalog } \\ + \hline +{AcceptAnyVolume } & {tinyint } & {Accept any volume from Pool } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds to retain volume } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {max jobs on volume } \\ + \hline +{MaxVolFiles } & {integer } & {max EOF marks to put on Volume } \\ + \hline +{MaxVolBytes } & {bigint } & {max bytes to write on Volume } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{Recycle } & {tinyint } & {yes|no for allowing auto recycling of Volume } +\\ + \hline +{PoolType } & {enum } & {Backup, Copy, Cloned, Archive, Migration } \\ + \hline +{LabelFormat } & {Tinyblob } & {Label format } +\\ \hline + +\end{longtable} + +The {\bf Pool} table contains one entry for each media pool controlled by +Bacula in this database. One media record exists for each of the NumVols +contained in the Pool. The PoolType is a Bacula defined keyword. The MediaType +is defined by the administrator, and corresponds to the MediaType specified in +the Director's Storage definition record. The CurrentVol is the sequence +number of the Media record for the current volume. + +\ + +\addcontentsline{lot}{table}{Client Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Client } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{ClientId } & {integer } & {Primary Key } \\ + \hline +{Name } & {TinyBlob } & {File Services Name } \\ + \hline +{UName } & {TinyBlob } & {uname -a from Client (not yet used) } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{FileRetention } & {bigint } & {64 bit seconds to retain Files } \\ + \hline +{JobRetention } & {bigint } & {64 bit seconds to retain Job } +\\ \hline + +\end{longtable} + +The {\bf Client} table contains one entry for each machine backed up by Bacula +in this database. Normally the Name is a fully qualified domain name. + +\ + +\addcontentsline{lot}{table}{Unsaved Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf UnsavedFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{UnsavedId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {JobId corresponding to this record } \\ + \hline +{PathId } & {integer } & {Id of path } \\ + \hline +{FilenameId } & {integer } & {Id of filename } +\\ \hline + +\end{longtable} + +The {\bf UnsavedFiles} table contains one entry for each file that was not +saved. Note! This record is not yet implemented. + +\ + +\addcontentsline{lot}{table}{Counter Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Counter } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{Counter } & {tinyblob } & {Counter name } \\ + \hline +{MinValue } & {integer } & {Start/Min value for counter } \\ + \hline +{MaxValue } & {integer } & {Max value for counter } \\ + \hline +{CurrentValue } & {integer } & {Current counter value } \\ + \hline +{WrapCounter } & {tinyblob } & {Name of another counter } +\\ \hline + +\end{longtable} + +The {\bf Counter} table contains one entry for each permanent counter defined +by the user. + +\ + +\addcontentsline{lot}{table}{Version Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Version } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{VersionId } & {integer } & {Primary Key } +\\ \hline + +\end{longtable} + +The {\bf Version} table defines the Bacula database version number. Bacula +checks this number before reading the database to ensure that it is compatible +with the Bacula binary file. + +\ + +\addcontentsline{lot}{table}{Base Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf BaseFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{BaseId } & {integer } & {Primary Key } \\ + \hline +{BaseJobId } & {integer } & {JobId of Base Job } \\ + \hline +{JobId } & {integer } & {Reference to Job } \\ + \hline +{FileId } & {integer } & {Reference to File } \\ + \hline +{FileIndex } & {integer } & {File Index number } +\\ \hline + +\end{longtable} + +The {\bf BaseFiles} table contains all the File references for a particular +JobId that point to a Base file -- i.e. they were previously saved and hence +were not saved in the current JobId but in BaseJobId under FileId. FileIndex +is the index of the file, and is used for optimization of Restore jobs to +prevent the need to read the FileId record when creating the in memory tree. +This record is not yet implemented. + +\ + +\subsubsection*{MySQL Table Definition} +\index[general]{MySQL Table Definition } +\index[general]{Definition!MySQL Table } +\addcontentsline{toc}{subsubsection}{MySQL Table Definition} + +The commands used to create the MySQL tables are as follows: + +\footnotesize +\begin{verbatim} +USE bacula; +CREATE TABLE Filename ( + FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name BLOB NOT NULL, + PRIMARY KEY(FilenameId), + INDEX (Name(30)) + ); +CREATE TABLE Path ( + PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Path BLOB NOT NULL, + PRIMARY KEY(PathId), + INDEX (Path(50)) + ); +CREATE TABLE File ( + FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0, + LStat TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + PRIMARY KEY(FileId), + INDEX (JobId), + INDEX (PathId), + INDEX (FilenameId) + ); +CREATE TABLE Job ( + JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Job TINYBLOB NOT NULL, + Name TINYBLOB NOT NULL, + Type BINARY(1) NOT NULL, + Level BINARY(1) NOT NULL, + ClientId INTEGER NOT NULL REFERENCES Client, + JobStatus BINARY(1) NOT NULL, + SchedTime DATETIME NOT NULL, + StartTime DATETIME NOT NULL, + EndTime DATETIME NOT NULL, + JobTDate BIGINT UNSIGNED NOT NULL, + VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobBytes BIGINT UNSIGNED NOT NULL, + JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + FileSetId INTEGER UNSIGNED NOT NULL REFERENCES FileSet, + PurgedFiles TINYINT NOT NULL DEFAULT 0, + HasBase TINYINT NOT NULL DEFAULT 0, + PRIMARY KEY(JobId), + INDEX (Name(128)) + ); +CREATE TABLE FileSet ( + FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileSet TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + CreateTime DATETIME NOT NULL, + PRIMARY KEY(FileSetId) + ); +CREATE TABLE JobMedia ( + JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media, + FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(JobMediaId), + INDEX (JobId, MediaId) + ); +CREATE TABLE Media ( + MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + VolumeName TINYBLOB NOT NULL, + Slot INTEGER NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + MediaType TINYBLOB NOT NULL, + FirstWritten DATETIME NOT NULL, + LastWritten DATETIME NOT NULL, + LabelDate DATETIME NOT NULL, + VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolCapacityBytes BIGINT UNSIGNED NOT NULL, + VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged', + 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL, + Recycle TINYINT NOT NULL DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + InChanger TINYINT NOT NULL DEFAULT 0, + MediaAddressing TINYINT NOT NULL DEFAULT 0, + VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(MediaId), + INDEX (PoolId) + ); +CREATE TABLE Pool ( + PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + UseOnce TINYINT NOT NULL, + UseCatalog TINYINT NOT NULL, + AcceptAnyVolume TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL, + VolUseDuration BIGINT UNSIGNED NOT NULL, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL, + AutoPrune TINYINT DEFAULT 0, + Recycle TINYINT DEFAULT 0, + PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL, + LabelFormat TINYBLOB, + Enabled TINYINT DEFAULT 1, + ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + UNIQUE (Name(128)), + PRIMARY KEY (PoolId) + ); +CREATE TABLE Client ( + ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + Uname TINYBLOB NOT NULL, /* full uname -a of client */ + AutoPrune TINYINT DEFAULT 0, + FileRetention BIGINT UNSIGNED NOT NULL, + JobRetention BIGINT UNSIGNED NOT NULL, + UNIQUE (Name(128)), + PRIMARY KEY(ClientId) + ); +CREATE TABLE BaseFiles ( + BaseId INTEGER UNSIGNED AUTO_INCREMENT, + BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + FileId INTEGER UNSIGNED NOT NULL REFERENCES File, + FileIndex INTEGER UNSIGNED, + PRIMARY KEY(BaseId) + ); +CREATE TABLE UnsavedFiles ( + UnsavedId INTEGER UNSIGNED AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + PRIMARY KEY (UnsavedId) + ); +CREATE TABLE Version ( + VersionId INTEGER UNSIGNED NOT NULL + ); +-- Initialize Version +INSERT INTO Version (VersionId) VALUES (7); +CREATE TABLE Counters ( + Counter TINYBLOB NOT NULL, + MinValue INTEGER, + MaxValue INTEGER, + CurrentValue INTEGER, + WrapCounter TINYBLOB NOT NULL, + PRIMARY KEY (Counter(128)) + ); +\end{verbatim} +\normalsize diff --git a/docs/manual-de/catmaintenance.tex b/docs/manual-de/catmaintenance.tex new file mode 100644 index 00000000..981d1d8a --- /dev/null +++ b/docs/manual-de/catmaintenance.tex @@ -0,0 +1,459 @@ +%% +%% + +\section*{Catalog Maintenance} +\label{_ChapterStart12} +\index[general]{Maintenance!Catalog } +\index[general]{Catalog Maintenance } +\addcontentsline{toc}{section}{Catalog Maintenance} + +Without proper setup and maintenance, your Catalog may continue to grow +indefinitely as you run Jobs and backup Files. How fast the size of your +Catalog grows depends on the number of Jobs you run and how many files they +backup. By deleting records within the database, you can make space available +for the new records that will be added during the next Job. By constantly +deleting old expired records (dates older than the Retention period), your +database size will remain constant. + +If you started with the default configuration files, they already contain +reasonable defaults for a small number of machines (less than 5), so if you +fall into that case, catalog maintenance will not be urgent if you have a few +hundred megabytes of disk space free. Whatever the case may be, some knowledge +of retention periods will be useful. +\label{Retention} + +\subsection*{Setting Retention Periods} +\index[general]{Setting Retention Periods } +\index[general]{Periods!Setting Retention } +\addcontentsline{toc}{subsection}{Setting Retention Periods} + +{\bf Bacula} uses three Retention periods: the {\bf File Retention} period, +the {\bf Job Retention} period, and the {\bf Volume Retention} period. Of +these three, the File Retention period is by far the most important in +determining how large your database will become. + +The {\bf File Retention} and the {\bf Job Retention} are specified in each +Client resource as is shown below. The {\bf Volume Retention} period is +specified in the Pool resource, and the details are given in the next chapter +of this manual. + +\begin{description} + +\item [File Retention = \lt{}time-period-specification\gt{}] + \index[dir]{File Retention } + The File Retention record defines the length of time that Bacula will keep +File records in the Catalog database. When this time period expires, and if +{\bf AutoPrune} is set to {\bf yes}, Bacula will prune (remove) File records +that are older than the specified File Retention period. The pruning will +occur at the end of a backup Job for the given Client. Note that the Client +database record contains a copy of the File and Job retention periods, but +Bacula uses the current values found in the Director's Client resource to do +the pruning. + +Since File records in the database account for probably 80 percent of the +size of the database, you should carefully determine exactly what File +Retention period you need. Once the File records have been removed from +the database, you will no longer be able to restore individual files +in a Job. However, with Bacula version 1.37 and later, as long as the +Job record still exists, you will be able to restore all files in the +job. + +Retention periods are specified in seconds, but as a convenience, there are +a number of modifiers that permit easy specification in terms of minutes, +hours, days, weeks, months, quarters, or years on the record. See the +\ilink{ Configuration chapter}{Time} of this manual for additional details +of modifier specification. + +The default File retention period is 60 days. + +\item [Job Retention = \lt{}time-period-specification\gt{}] + \index[dir]{Job Retention } + The Job Retention record defines the length of time that {\bf Bacula} +will keep Job records in the Catalog database. When this time period +expires, and if {\bf AutoPrune} is set to {\bf yes} Bacula will prune +(remove) Job records that are older than the specified Job Retention +period. Note, if a Job record is selected for pruning, all associated File +and JobMedia records will also be pruned regardless of the File Retention +period set. As a consequence, you normally will set the File retention +period to be less than the Job retention period. + +As mentioned above, once the File records are removed from the database, +you will no longer be able to restore individual files from the Job. +However, as long as the Job record remains in the database, you will be +able to restore all the files backuped for the Job (on version 1.37 and +later). As a consequence, it is generally a good idea to retain the Job +records much longer than the File records. + +The retention period is specified in seconds, but as a convenience, there +are a number of modifiers that permit easy specification in terms of +minutes, hours, days, weeks, months, quarters, or years. See the \ilink{ +Configuration chapter}{Time} of this manual for additional details of +modifier specification. + +The default Job Retention period is 180 days. + +\item [AutoPrune = \lt{}yes/no\gt{}] + \index[dir]{AutoPrune } + If AutoPrune is set to {\bf yes} (default), Bacula will automatically apply +the File retention period and the Job retention period for the Client at the +end of the Job. + +If you turn this off by setting it to {\bf no}, your Catalog will grow each +time you run a Job. +\end{description} + +\label{CompactingMySQL} + +\subsection*{Compacting Your MySQL Database} +\index[general]{Database!Compacting Your MySQL } +\index[general]{Compacting Your MySQL Database } +\addcontentsline{toc}{subsection}{Compacting Your MySQL Database} + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, {\bf MySQL} does not effectively +use the space, and instead continues growing. To avoid this, from time to +time, you must compact your database. Normally, large commercial database such +as Oracle have commands that will compact a database to reclaim wasted file +space. MySQL has the {\bf OPTIMIZE TABLE} command that you can use, and SQLite +version 2.8.4 and greater has the {\bf VACUUM} command. We leave it to you to +explore the utility of the {\bf OPTIMIZE TABLE} command in MySQL. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for both MySQL and SQLite. + +For a {\bf MySQL} database, you could write the Bacula database as an ASCII +file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +mysqldump -f --opt bacula > bacula.sql +mysql bacula < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, if I cd to the location of the MySQL +Bacula database (typically /opt/mysql/var or something similar) and enter: + +\footnotesize +\begin{verbatim} +du bacula +\end{verbatim} +\normalsize + +I get {\bf 620,644} which means there are that many blocks containing 1024 +bytes each or approximately 635 MB of data. After doing the {\bf msqldump}, I +had a bacula.sql file that had {\bf 174,356} blocks, and after doing the {\bf +mysql} command to recreate the database, I ended up with a total of {\bf +210,464} blocks rather than the original {\bf 629,644}. In other words, the +compressed version of the database took approximately one third of the space +of the database that had been in use for about a year. + +As a consequence, I suggest you monitor the size of your database and from +time to time (once every 6 months or year), compress it. +\label{RepairingMySQL} + +\subsection*{Repairing Your MySQL Database} +\index[general]{Database!Repairing Your MySQL } +\index[general]{Repairing Your MySQL Database } +\addcontentsline{toc}{subsection}{Repairing Your MySQL Database} + +If you find that you are getting errors writing to your MySQL database, or +Bacula hangs each time it tries to access the database, you should consider +running MySQL's database check and repair routines. The program you need to +run depends on the type of database indexing you are using. If you are using +the default, you will probably want to use {\bf myisamchk}. For more details +on how to do this, please consult the MySQL document at: +\elink{ +http://www.mysql.com/doc/en/Repair.html} +{http://www.mysql.com/doc/en/Repair.html}. + +If the errors you are getting are simply SQL warnings, then you might try +running dbcheck before (or possibly after) using the MySQL database repair +program. It can clean up many of the orphaned record problems, and certain +other inconsistencies in the Bacula database. +\label{RepairingPSQL} + +\subsection*{Repairing Your PostgreSQL Database} +\index[general]{Database!Repairing Your PostgreSQL } +\index[general]{Repairing Your PostgreSQL Database } +\addcontentsline{toc}{subsection}{Repairing Your PostgreSQL Database} + +The same considerations apply that are indicated above for MySQL. That is, +consult the PostgreSQL documents for how to repair the database, and also +consider using Bacula's dbcheck program if the conditions are reasonable for +using (see above). +\label{CompactingPostgres} + +\subsection*{Compacting Your PostgreSQL Database} +\index[general]{Database!Compacting Your PostgreSQL } +\index[general]{Compacting Your PostgreSQL Database } +\addcontentsline{toc}{subsection}{Compacting Your PostgreSQL Database} + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, PostgreSQL has a {\bf VACUUM} +command that will compact your database for you. Alternatively you may want to +use the {\bf vacuumdb} command, which can be run from a cron job. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for PostgreSQL. + +For a {\bf PostgreSQL} database, you could write the Bacula database as an +ASCII file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +pg_dump bacula > bacula.sql +cat bacula.sql | psql bacula +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, you can {\bf cd} to the location of +the Bacula database (typically /usr/local/pgsql/data or possible +/var/lib/pgsql/data) and check the size. + +\subsection*{Compacting Your SQLite Database} +\index[general]{Compacting Your SQLite Database } +\index[general]{Database!Compacting Your SQLite } +\addcontentsline{toc}{subsection}{Compacting Your SQLite Database} + +First please read the previous section that explains why it is necessary to +compress a database. SQLite version 2.8.4 and greater have the {\bf Vacuum} +command for compacting the database. + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo 'vacuum;' | sqlite bacula.db +\end{verbatim} +\normalsize + +As an alternative, you can use the following commands, adapted to your system: + + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo '.dump' | sqlite bacula.db > bacula.sql +rm -f bacula.db +sqlite bacula.db < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Where {\bf working-directory} is the directory that you specified in the +Director's configuration file. Note, in the case of SQLite, it is necessary to +completely delete (rm) the old database before creating a new compressed +version. + +\subsection*{Migrating from SQLite to MySQL} +\index[general]{MySQL!Migrating from SQLite to } +\index[general]{Migrating from SQLite to MySQL } +\addcontentsline{toc}{subsection}{Migrating from SQLite to MySQL} + +You may begin using Bacula with SQLite then later find that you want to switch +to MySQL for any of a number of reasons: SQLite tends to use more disk than +MySQL, SQLite apparently does not handle database sizes greater than 2GBytes, +... Several users have done so by first producing an ASCII ``dump'' of the +SQLite database, then creating the MySQL tables with the {\bf +create\_mysql\_tables} script that comes with Bacula, and finally feeding the +SQLite dump into MySQL using the {\bf -f} command line option to continue past +the errors that are generated by the DDL statements that SQLite's dump +creates. Of course, you could edit the dump and remove the offending +statements. Otherwise, MySQL accepts the SQL produced by SQLite. +\label{BackingUpBacula} + +\subsection*{Backing Up Your Bacula Database} +\index[general]{Backing Up Your Bacula Database } +\index[general]{Database!Backing Up Your Bacula } +\addcontentsline{toc}{subsection}{Backing Up Your Bacula Database} + +If ever the machine on which your Bacula database crashes, and you need to +restore from backup tapes, one of your first priorities will probably be to +recover the database. Although Bacula will happily backup your catalog +database if it is specified in the FileSet, this is not a very good way to do +it, because the database will be saved while Bacula is modifying it. Thus the +database may be in an instable state. Worse yet, you will backup the database +before all the Bacula updates have been applied. + +To resolve these problems, you need to backup the database after all the backup +jobs have been run. In addition, you will want to make a copy while Bacula is +not modifying it. To do so, you can use two scripts provided in the release +{\bf make\_catalog\_backup} and {\bf delete\_catalog\_backup}. These files +will be automatically generated along with all the other Bacula scripts. The +first script will make an ASCII copy of your Bacula database into {\bf +bacula.sql} in the working directory you specified in your configuration, and +the second will delete the {\bf bacula.sql} file. + +The basic sequence of events to make this work correctly is as follows: + +\begin{itemize} +\item Run all your nightly backups +\item After running your nightly backups, run a Catalog backup Job +\item The Catalog backup job must be scheduled after your last nightly backup + +\item You use {\bf RunBeforeJob} to create the ASCII backup file and {\bf + RunAfterJob} to clean up + \end{itemize} + +Assuming that you start all your nightly backup jobs at 1:05 am (and that they +run one after another), you can do the catalog backup with the following +additional Director configuration statements: + +\footnotesize +\begin{verbatim} +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Type = Backup + Client=rufus-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = DLTDrive + Messages = Standard + Pool = Default + RunBeforeJob = "/home/kern/bacula/bin/make_catalog_backup" + RunAfterJob = "/home/kern/bacula/bin/delete_catalog_backup" +} +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup + Run = Full sun-sat at 1:10 +} +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include = signature=MD5 { + @working_directory@/bacula.sql + } +} +\end{verbatim} +\normalsize + +\label{BackingUPOtherDBs} + +\subsection*{Backing Up Third Party Databases} +\index[general]{Backing Up Third Party Databases } +\index[general]{Databases!Backing Up Third Party } +\addcontentsline{toc}{subsection}{Backing Up Third Party Databases} + +If you are running a database in production mode on your machine, Bacula will +happily backup the files, but if the database is in use while Bacula is +reading it, you may back it up in an unstable state. + +The best solution is to shutdown your database before backing it up, or use +some tool specific to your database to make a valid live copy perhaps by +dumping the database in ASCII format. I am not a database expert, so I cannot +provide you advice on how to do this, but if you are unsure about how to +backup your database, you might try visiting the Backup Central site, which +has been renamed Storage Mountain (www.backupcentral.com). In particular, +their +\elink{ Free Backup and Recovery +Software}{http://www.backupcentral.com/toc-free-backup-software.html} page has +links to scripts that show you how to shutdown and backup most major +databases. +\label{Size} + +\subsection*{Database Size} +\index[general]{Size!Database } +\index[general]{Database Size } +\addcontentsline{toc}{subsection}{Database Size} + +As mentioned above, if you do not do automatic pruning, your Catalog will grow +each time you run a Job. Normally, you should decide how long you want File +records to be maintained in the Catalog and set the {\bf File Retention} +period to that time. Then you can either wait and see how big your Catalog +gets or make a calculation assuming approximately 154 bytes for each File +saved and knowing the number of Files that are saved during each backup and +the number of Clients you backup. + +For example, suppose you do a backup of two systems, each with 100,000 files. +Suppose further that you do a Full backup weekly and an Incremental every day, +and that the Incremental backup typically saves 4,000 files. The size of your +database after a month can roughly be calculated as: + +\footnotesize +\begin{verbatim} + Size = 154 * No. Systems * (100,000 * 4 + 10,000 * 26) +\end{verbatim} +\normalsize + +where we have assumed 4 weeks in a month and 26 incremental backups per month. +This would give the following: + +\footnotesize +\begin{verbatim} + Size = 154 * 2 * (100,000 * 4 + 10,000 * 26) +or + Size = 308 * (400,000 + 260,000) +or + Size = 203,280,000 bytes +\end{verbatim} +\normalsize + +So for the above two systems, we should expect to have a database size of +approximately 200 Megabytes. Of course, this will vary according to how many +files are actually backed up. + +Below are some statistics for a MySQL database containing Job records for five +Clients beginning September 2001 through May 2002 (8.5 months) and File +records for the last 80 days. (Older File records have been pruned). For these +systems, only the user files and system files that change are backed up. The +core part of the system is assumed to be easily reloaded from the RedHat rpms. + + +In the list below, the files (corresponding to Bacula Tables) with the +extension .MYD contain the data records whereas files with the extension .MYI +contain indexes. + +You will note that the File records (containing the file attributes) make up +the large bulk of the number of records as well as the space used (459 Mega +Bytes including the indexes). As a consequence, the most important Retention +period will be the {\bf File Retention} period. A quick calculation shows that +for each File that is saved, the database grows by approximately 150 bytes. + +\footnotesize +\begin{verbatim} + Size in + Bytes Records File + ============ ========= =========== + 168 5 Client.MYD + 3,072 Client.MYI + 344,394,684 3,080,191 File.MYD + 115,280,896 File.MYI + 2,590,316 106,902 Filename.MYD + 3,026,944 Filename.MYI + 184 4 FileSet.MYD + 2,048 FileSet.MYI + 49,062 1,326 JobMedia.MYD + 30,720 JobMedia.MYI + 141,752 1,378 Job.MYD + 13,312 Job.MYI + 1,004 11 Media.MYD + 3,072 Media.MYI + 1,299,512 22,233 Path.MYD + 581,632 Path.MYI + 36 1 Pool.MYD + 3,072 Pool.MYI + 5 1 Version.MYD + 1,024 Version.MYI +\end{verbatim} +\normalsize + +This database has a total size of approximately 450 Megabytes. + +If we were using SQLite, the determination of the total database size would be +much easier since it is a single file, but we would have less insight to the +size of the individual tables as we have in this case. + +Note, SQLite databases may be as much as 50\% larger than MySQL databases due +to the fact that all data is stored as ASCII strings. That is even binary +integers are stored as ASCII strings, and this seems to increase the space +needed. diff --git a/docs/manual-de/check_tex.pl b/docs/manual-de/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manual-de/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manual-de/configure.tex b/docs/manual-de/configure.tex new file mode 100644 index 00000000..013f2148 --- /dev/null +++ b/docs/manual-de/configure.tex @@ -0,0 +1,395 @@ +%% +%% + +\section*{Customizing the Configuration Files} +\label{_ChapterStart16} +\index[general]{Files!Customizing the Configuration } +\index[general]{Customizing the Configuration Files } +\addcontentsline{toc}{section}{Customizing the Configuration Files} + +When each of the Bacula programs starts, it reads a configuration file +specified on the command line or the default {\bf bacula-dir.conf}, {\bf +bacula-fd.conf}, {\bf bacula-sd.conf}, or {\bf console.conf} for the Director +daemon, the File daemon, the Storage daemon, and the Console program +respectively. + +Each service (Director, Client, Storage, Console) has its own configuration +file containing a set of Resource definitions. These resources are very +similar from one service to another, but may contain different directives +(records) depending on the service. For example, in the Director's resource +file, the {\bf Director} resource defines the name of the Director, a number +of global Director parameters and his password. In the File daemon +configuration file, the {\bf Director} resource specifies which Directors are +permitted to use the File daemon. + +Before running Bacula for the first time, you must customize the configuration +files for each daemon. Default configuration files will have been created by +the installation process, but you will need to modify them to correspond to +your system. An overall view of the resources can be seen in the following: + +\addcontentsline{lof}{figure}{Bacula Objects} +\includegraphics{./bacula-objects.eps} +\\ +(thanks to Aristides Maniatis for the above graphic) +\label{ResFormat} + +\subsection*{Resource Directive Format} +\index[general]{Resource Directive Format } +\index[general]{Format!Resource Directive } +\addcontentsline{toc}{subsection}{Resource Directive Format} + +Although, you won't need to know the details of all the directives a basic +knowledge of Bacula resource directives is essential. Each directive contained +within the resource (within the braces) is composed of a keyword followed by +an equal sign (=) followed by one or more values. The keywords must be one of +the known Bacula resource record keywords, and it may be composed of upper or +lower case characters and spaces. + +Each resource definition MUST contain a Name directive, and may optionally +contain a Description directive (or record). The Name directive is used to +uniquely identify the resource. The Description directive is (will be) used +during display of the Resource to provide easier human recognition. For +example: + +\footnotesize +\begin{verbatim} +Director { + Name = "MyDir" + Description = "Main Bacula Director" + WorkingDirectory = "$HOME/bacula/bin/working" +} +\end{verbatim} +\normalsize + +Defines the Director resource with the name ``MyDir'' and a working directory +\$HOME/bacula/bin/working. In general, if you want spaces in a name to the +right of the first equal sign (=), you must enclose that name within double +quotes. Otherwise quotes are not generally necessary because once defined, +quoted strings and unquoted strings are all equal. +\label{Comments} + +\subsubsection*{Comments} +\index[general]{Comments } +\addcontentsline{toc}{subsubsection}{Comments} + +When reading the configuration file, blank lines are ignored and everything +after a hash sign (\#) until the end of the line is taken to be a comment. A +semicolon (;) is a logical end of line, and anything after the semicolon is +considered as the next statement. If a statement appears on a line by itself, +a semicolon is not necessary to terminate it, so generally in the examples in +this manual, you will not see many semicolons. +\label{Case1} + +\subsubsection*{Upper and Lower Case and Spaces} +\index[general]{Spaces!Upper and Lower Case and } +\index[general]{Upper and Lower Case and Spaces } +\addcontentsline{toc}{subsubsection}{Upper and Lower Case and Spaces} + +Case (upper/lower) and spaces are totally ignored in the resource directive +keywords (the part before the equal sign). + +Within the keyword (i.e. before the equal sign), spaces are not significant. +Thus the keywords: {\bf name}, {\bf Name}, and {\bf N a m e} are all +identical. + +Spaces after the equal sign and before the first character of the value are +ignored. + +In general, spaces within a value are significant (not ignored), and if the +value is a name, you must enclose the name in double quotes for the spaces to +be accepted. Names may contain up to 127 characters. Currently, a name may +contain any ASCII character. Within a quoted string, any character following a +backslash (\textbackslash{}) is taken as itself (handy for inserting +blackslashes and double quotes (``). + +Please note, however, that Bacula resource names as well as certain other +names (e.g. Volume names) will in the future be severely limited to permit +only letters (including ISO accented letters), numbers, and a few special +characters (space, underscore, ...). All other characters and punctuation will +be illegal. +\label{Includes} + +\subsubsection*{Including other Configuration Files} +\index[general]{Including other Configuration Files } +\index[general]{Files!Including other Configuration } +\addcontentsline{toc}{subsubsection}{Including other Configuration Files} + +If you wish to break your configuration file into smaller pieces, you can do +so by including other files using the syntax @{\bf filename} where {\bf +filename} is the full path and filename of another file. The @filename +specification can be given anywhere a primitive token would appear. +\label{DataTypes} + +\subsubsection*{Recognized Primitive Data Types} +\index[general]{Types!Recognized Primitive Data } +\index[general]{Recognized Primitive Data Types } +\addcontentsline{toc}{subsubsection}{Recognized Primitive Data Types} + +When parsing the resource directives, Bacula classifies the data according to +the types listed below. The first time you read this, it may appear a bit +overwhelming, but in reality, it is all pretty logical and straightforward. + +\begin{description} + +\item [name] + \index[fd]{name } + A keyword or name consisting of alphanumeric characters, including the +hyphen, underscore, and dollar characters. The first character of a {\bf +name} must be a letter. A name has a maximum length currently set to 127 +bytes. Typically keywords appear on the left side of an equal (i.e. they are +Bacula keywords -- i.e. Resource names or directive names). Keywords may not +be quoted. + +\item [name-string] + \index[fd]{name-string } + A name-string is similar to a name, except that the name may be quoted and +can thus contain additional characters including spaces. Name strings are +limited to 127 characters in length. Name strings are typically used on the +right side of an equal (i.e. they are values to be associated with a keyword. + + +\item [string] + \index[fd]{string } + A quoted string containing virtually any character including spaces, or a +non-quoted string. A string may be of any length. Strings are typically +values that correspond to filenames, directories, or system command names. A +backslash (\textbackslash{}) turns the next character into itself, so to +include a double quote in a string, you precede the double quote with a +backslash. Likewise to include a backslash. + +\item [directory] + \index[dir]{directory } + A directory is either a quoted or non-quoted string. A directory will be +passed to your standard shell for expansion when it is scanned. Thus +constructs such as {\bf \$HOME} are interpreted to be their correct values. + +\item [password] + \index[dir]{password } + This is a Bacula password and it is stored internally in MD5 hashed format. + +\item [integer] + \index[dir]{integer } + A 32 bit integer value. It may be positive or negative. + +\item [positive integer] + \index[dir]{positive integer } + A 32 bit positive integer value. + +\item [long integer] + \index[dir]{long integer } + A 64 bit integer value. Typically these are values such as bytes that can +exceed 4 billion and thus require a 64 bit value. + +\item [yes|no] + \index[dir]{yes|no } + Either a {\bf yes} or a {\bf no}. + +\item [ + \label{Size1} + size] +\index[dir]{a name } +A size specified as bytes. Typically, this is a floating point scientific +input format followed by an optional modifier. The floating point input is +stored as a 64 bit integer value. If a modifier is present, it must +immediately follow the value with no intervening spaces. The following +modifiers are permitted: + +\begin{description} + +\item [k] + 1,024 (kilobytes) + +\item [kb] + 1,000 (kilobytes) + +\item [m] + 1,048,576 (megabytes) + +\item [mb] + 1,000,000 (megabytes) + +\item [g] + 1,073,741,824 (gigabytes) + +\item [gb] + 1,000,000,000 (gigabytes) + \end{description} + +\item {\bf + \label{Time} + time} +\index[dir]{a name } +A time or duration specified in seconds. The time is stored internally as a +64 bit integer value, but it is specified in two parts: a number part and a +modifier part. The number can be an integer or a floating point number. If it +is entered in floating point notation, it will be rounded to the nearest +integer. The modifer is mandatory and follows the number part, either with +or without intervening spaces. The following modifiers are permitted: + +\begin{description} + +\item [seconds] + \index[dir]{seconds } + seconds + +\item [minutes] + \index[dir]{minutes } + minutes (60 seconds) + +\item [hours] + \index[dir]{hours } + hours (3600 seconds) + +\item [days] + \index[dir]{days } + days (3600*24 seconds) + +\item [weeks] + \index[dir]{weeks } + weeks (3600*24*7 seconds) + +\item [months] + \index[dir]{months } + months (3600*24*30 seconds) + +\item [quarters] + \index[dir]{quarters } + quarters (3600*24*91 seconds) + +\item [years] + \index[dir]{years } + years (3600*24*365 seconds) +\end{description} + +Any abbreviation of these modifiers is also permitted (i.e. {\bf seconds} may +be specified as {\bf sec} or {\bf s}. A specification of {\bf m} will be +taken as months. + +The specification of a time may have as many number/modifier parts as you wish. +For example: + +\footnotesize +\begin{verbatim} +1 week 2 days 3 hours 10 mins +1 month 2 days 30 sec + +\end{verbatim} +\normalsize + +are valid date specifications (beginning with version 1.35.1). + +Note! in Bacula version 1.31 and below, the modifier was optional. It is now +mandatory. +\end{description} + +\label{ResTypes} + +\subsection*{Resource Types} +\index[general]{Types!Resource } +\index[general]{Resource Types } +\addcontentsline{toc}{subsection}{Resource Types} + +The following table lists all current Bacula resource types. It shows what +resources must be defined for each service (daemon). The default configuration +files will already contain at least one example of each permitted resource, so +you need not worry about creating all these kinds of resources from scratch. + +\addcontentsline{lot}{table}{Resource Types} +\begin{longtable}{|l|l|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Resource } & \multicolumn{1}{c| }{\bf Director } & +\multicolumn{1}{c| }{\bf Client } & \multicolumn{1}{c| }{\bf Storage } & +\multicolumn{1}{c| }{\bf Console } \\ + \hline +{Catalog } & {Yes } & {No } & {No } & {No } \\ + \hline +{Client } & {Yes } & {Yes } & {No } & {No } \\ + \hline +{Console } & {Yes } & {No } & {No } & {Yes } \\ + \hline +{Device } & {No } & {No } & {Yes } & {No } \\ + \hline +{Director } & {Yes } & {Yes } & {Yes } & {Yes } \\ + \hline +{FileSet } & {Yes } & {No } & {No } & {No } \\ + \hline +{Job } & {Yes } & {No } & {No } & {No } \\ + \hline +{JobDefs } & {Yes } & {No } & {No } & {No } \\ + \hline +{Message } & {Yes } & {Yes } & {Yes } & {No } \\ + \hline +{Pool } & {Yes } & {No } & {No } & {No } \\ + \hline +{Schedule } & {Yes } & {No } & {No } & {No } \\ + \hline +{Storage } & {Yes } & {No } & {Yes } & {No } +\\ \hline + +\end{longtable} + +\subsection*{Names, Passwords and Authorization} +\label{Names} +\index[general]{Authorization!Names Passwords and } +\index[general]{Names, Passwords and Authorization } +\addcontentsline{toc}{subsection}{Names, Passwords and Authorization} + +In order for one daemon to contact another daemon, it must authorize itself +with a password. In most cases, the password corresponds to a particular name, +so both the name and the password must match to be authorized. + +The default configuration files are automatically defined for correct +authorization with random passwords. If you add to or modify these files, you +will need to take care to keep them consistent. + +Here is sort of a picture of what names/passwords in which files/Resources +must match up: + +\includegraphics{./Conf-Diagram.eps} + +In the left column, you will find the Director, Storage, and Client resources, +with their names and passwords -- these are all in {\bf bacula-dir.conf}. In +the right column are where the corresponding values should be found in the +Console, Storage daemon (SD), and File daemon (FD) configuration files. + +Please note that the Address, {\bf fd-sd}, that appears in the Storage +resource of the Director, preceded with and asterisk in the above example, is +passed to the File daemon in symbolic form. The File daemon then resolves it +to an IP address. For this reason, you must use either an IP address or a +fully qualified name. A name such as {\bf localhost}, not being a fully +qualified name, will resolve in the File daemon to the localhost of the File +daemon, which is most likely not what is desired. The password used for the +File daemon to authorize with the Storage daemon is a temporary password +unique to each Job created by the daemons and is not specified in any .conf +file. + +\subsection*{Detailed Information for each Daemon} +\index[general]{Detailed Information for each Daemon } +\index[general]{Daemon!Detailed Information for each } +\addcontentsline{toc}{subsection}{Detailed Information for each Daemon} + +The details of each Resource and the directives permitted therein are +described in the following chapters. + +The following configuration files must be defined: + +\begin{itemize} +\item + \ilink{Console}{_ChapterStart36} -- to define the resources for + the Console program (user interface to the Director). It defines which +Directors are available so that you may interact with them. +\item + \ilink{Director}{_ChapterStart40} -- to define the resources + necessary for the Director. You define all the Clients and Storage daemons +that you use in this configuration file. +\item + \ilink{Client}{_ChapterStart25} -- to define the resources for + each client to be backed up. That is, you will have a separate Client +resource file on each machine that runs a File daemon. +\item + \ilink{Storage}{_ChapterStart31} -- to define the resources to + be used by each Storage daemon. Normally, you will have a single Storage +daemon that controls your tape drive or tape drives. However, if you have +tape drives on several machines, you will have at least one Storage daemon +per machine. +\end{itemize} diff --git a/docs/manual-de/console.tex b/docs/manual-de/console.tex new file mode 100644 index 00000000..78fef582 --- /dev/null +++ b/docs/manual-de/console.tex @@ -0,0 +1,1161 @@ +%% +%% + +\section*{Bacula Console} +\label{_ChapterStart23} +\index[general]{Console!Bacula } +\index[general]{Bacula Console } +\addcontentsline{toc}{section}{Bacula Console} + +\subsection*{General} +\index[general]{General } +\addcontentsline{toc}{subsection}{General} + +The {\bf Bacula Console} (sometimes called the User Agent) is a program that +allows the user or the System Administrator, to interact with the Bacula +Director daemon while the daemon is running. + +The current Bacula Console comes in two versions: a shell interface (TTY +style), and a GNOME GUI interface. Both permit the administrator or authorized +users to interact with Bacula. You can determine the status of a particular +job, examine the contents of the Catalog as well as perform certain tape +manipulations with the Console program. + +In addition, there is a wx-console built with wxWidgets that allows a graphic +restore of files. As of version 1.34.1 it is in an early stage of development, +but it already is quite useful. + +Since the Console program interacts with the Director through the network, your +Console and Director programs do not necessarily need to run on the same +machine. + +In fact, a certain minimal knowledge of the Console program is needed in order +for Bacula to be able to write on more than one tape, because when Bacula +requests a new tape, it waits until the user, via the Console program, +indicates that the new tape is mounted. + +\subsection*{Configuration} +\index[general]{Configuration } +\addcontentsline{toc}{subsection}{Configuration} + +When the Console starts, it reads a standard Bacula configuration file named +{\bf bconsole.conf} or {\bf gnome-console.conf} in the case of the GNOME +Console version. This file allows default configuration of the Console, and at +the current time, the only Resource Record defined is the Director resource, +which gives the Console the name and address of the Director. For more +information on configuration of the Console program, please see the +\ilink{Console Configuration File}{_ChapterStart36} Chapter of +this document. + +\subsection*{Running the Console Program} +\index[general]{Running the Console Program } +\index[general]{Program!Running the Console } +\addcontentsline{toc}{subsection}{Running the Console Program} + +After launching the Console program (bconsole), it will prompt you for the +next command with an asterisk (*). (Note, in the GNOME version, the prompt is +not present; you simply enter the commands you want in the command text box at +the bottom of the screen.) Generally, for all commands, you can simply enter +the command name and the Console program will prompt you for the necessary +arguments. Alternatively, in most cases, you may enter the command followed by +arguments. The general format is: + +\footnotesize +\begin{verbatim} + [=] [=] ... +\end{verbatim} +\normalsize + +where {\bf command} is one of the commands listed below; {\bf keyword} is one +of the keywords listed below (usually followed by an argument); and {\bf +argument} is the value. The command may be abbreviated to the shortest unique +form. If two commands have the same starting letters, the one that will be +selected is the one that appears first in the {\bf help} listing. If you want +the second command, simply spell out the full command. None of the keywords +following the command may be abbreviated. + +For example: + +\footnotesize +\begin{verbatim} +list files jobid=23 +\end{verbatim} +\normalsize + +will list all files saved for JobId 23. Or: + +\footnotesize +\begin{verbatim} +show pools +\end{verbatim} +\normalsize + +will display all the Pool resource records. + +\subsection*{Stopping the Console Program} +\index[general]{Program!Stopping the Console } +\index[general]{Stopping the Console Program } +\addcontentsline{toc}{subsection}{Stopping the Console Program} + +Normally, you simply enter {\bf quit} or {\bf exit} and the Console program +will terminate. However, it waits until the Director acknowledges the command. +If the Director is already doing a lengthy command (e.g. prune), it may take +some time. If you want to immediately terminate the Console program, enter the +{\bf .quit} command. + +There is currently no way to interrupt a Console command once issued (i.e. +Ctrl-C does not work). However, if you are at a prompt that is asking you to +select one of several possibilities and you would like to abort the command, +you can enter a period ({\bf .}), and in most cases, you will either be +returned to the main command prompt or if appropriate the previous prompt (in +the case of nested prompts). In a few places such as where it is asking for a +Volume name, the period will be taken to be the Volume name. In that case, you +will most likely be able to cancel at the next prompt. +\label{list} + +\subsection*{Alphabetic List of Console Commands} +\index[general]{Commands!Alphabetic List of Console } +\index[general]{Alphabetic List of Console Commands } +\addcontentsline{toc}{subsection}{Alphabetic List of Console Commands} + +The following commands are currently implemented: + +\begin{description} +\item [{add [pool=\lt{}pool-name\gt{} storage=\lt{}storage\gt{} + jobid=\lt{}JobId\gt{}]} ] + \index[console]{add [pool } +This command is used to add Volumes to an existing Pool. The Volume names +entered are placed in the Catalog and thus become available for backup +operations. Normally, the {\bf label} command is used rather than this +command because the {\bf label} command labels the physical media (tape) and +does the equivalent of the {\bf add} command. This command affects only the +Catalog and not the physical media (data on Volumes). The physical media must +exist and be labeled before use (usually with the {\bf label} command). This +command can, however, be useful if you wish to add a number of Volumes to the +Pool that will be physically labeled at a later time. It can also be useful +if you are importing a tape from another site. Please see the {\bf label} +command below for the list of legal characters in a Volume name. + +\item [autodisplay on/off] + \index[console]{autodisplay on/off } + This command accepts {\bf on} or {\bf off} as an argument, and turns +auto-display of messages on or off respectively. The default for the console +program is {\bf off}, which means that you will be notified when there are +console messages pending, but they will not automatically be displayed. The +default for the gnome-console program is {\bf on}, which means that messages +will be displayed when they are received (usually within 5 seconds of them +being generated). + +When autodisplay is turned off, you must explicitly retrieve the messages +with the {\bf messages} command. When autodisplay is turned on, the messages +will be displayed on the console as they are received. + +\item [automount on/off] + \index[console]{automount on/off } + This command accepts {\bf on} or {\bf off} as the argument, and turns +auto-mounting of the tape after a {\bf label} command on or off respectively. +The default is {\bf on}. If {\bf automount} is turned off, you must +explicitly {\bf mount} the tape after a label command to use it. + +\item [{cancel [jobid=\lt{}number\gt{} job=\lt{}job-name\gt{}]}] + \index[console]{cancel [jobid } + This command is used to cancel a job and accepts {\bf jobid=nnn} or {\bf +job=xxx} as an argument where nnn is replaced by the JobId and xxx is +replaced by the job name. If you do not specify a keyword, the Console +program will prompt you with the names of all the active jobs allowing you to +choose one. + +Once a Job is marked to be canceled, it may take a bit of time (generally +within a minute) before it actually terminates, depending on what operations +it is doing. + +\item [{ create [pool=\lt{}pool-name\gt{}]}] + \index[console]{create [pool } + This command is used to create a Pool record in the database using the Pool +resource record defined in the Director's configuration file. So in a sense, +this command simply transfers the information from the Pool resource in the +configuration file into the Catalog. Normally this command is done +automatically for you when the Director starts providing the Pool is +referenced within a Job resource. If you use this command on an existing +Pool, it will automatically update the Catalog to have the same information +as the Pool resource. After creating a Pool, you will most likely use the +{\bf label} command to label one or more volumes and add their names to the +Media database. + +When starting a Job, if Bacula determines that there is no Pool record in the +database, but there is a Pool resource of the appropriate name, it will +create it for you. If you want the Pool record to appear in the database +immediately, simply use this command to force it to be created. + +\item [{ delete [volume=\lt{}vol-name\gt{} pool=\lt{}pool-name\gt{} job + jobid=\lt{}id\gt{}] }] + \index[console]{delete } +The delete command is used to delete a Volume, Pool or Job record from the +Catalog as well as all associated Volume records that were created. This +command operates only on the Catalog database and has no effect on the actual +data written to a Volume. This command can be dangerous and we strongly +recommend that you do not use it unless you know what you are doing. + +If the keyword {\bf Volume} appears on the command line, the named Volume +will be deleted from the catalog, if the keyword {\bf Pool} appears on the +command line, a Pool will be deleted, and if the keyword {\bf Job} appears on +the command line, a Job and all its associated records (File and JobMedia) +will be deleted from the catalog. The full form of this command is: + +delete pool=\lt{}pool-name\gt{} + +or + +delete volume=\lt{}volume-name\gt{} pool=\lt{}pool-name\gt{} or + +delete JobId=\lt{}job-id\gt{} JobId=\lt{}job-id2\gt{} ... or + +delete Job JobId=n,m,o-r,t ... + +The first form deletes a Pool record from the catalog database. The second +form deletes a Volume record from the specified pool in the catalog database. +The third form deletes the specified Job record from the catalog database. +The last form deletes JobId records for JobIds n,m,o,p, q,r, and t. Where each +one of the n,m,... is, of course, a number. +\label{estimate} + +\item [estimate] + \index[console]{estimate } + Using this command, you can get an idea how many files will be backed up, or +if you are unsure about your Include statements in your FileSet, you can test +them without doing an actual backup. The default is to assume a Full backup. +However, you can override this by specifying a {\bf level=Incremental} or +{\bf level=Differential} on the command line. A Job name must be specified +or you will be prompted for one, and optionally a Client and FileSet may be +specified on the command line. It then contacts the client which computes +the number of files and bytes that would be backed up. Please note that this +is an estimate calculated from the number of blocks in the file rather than +by reading the actual bytes. As such, the estimated backup size will +generally be larger than an actual backup. + +Optionally you may specify the keyword {\bf listing} in which case, all the +files to be backed up will be listed. Note, it could take quite some time to +display them if the backup is large. The full form is: + +estimate job=\lt{}job-name\gt{} listing client=\lt{}client-name\gt{} +fileset=\lt{}fileset-name\gt{} level=\lt{}level-name\gt{} + +Specification of the {\bf job} is sufficient, but you can also override the +client, fileset and/or level by specifying them on the estimate command line. + + +As an example, you might do: + +\footnotesize +\begin{verbatim} + @output /tmp/listing + estimate job=NightlySave listing level=Incremental + @output + +\end{verbatim} +\normalsize + +which will do a full listing of all files to be backed up for the Job {\bf +NightlySave} during an Incremental save and put it in the file {\bf +/tmp/listing}. + +\item [help] + \index[console]{help } + This command displays the list of commands available. + +\item [label] + \index[console]{label } + This command is used to label physical volumes. The full form of this command +is: + +label storage=\lt{}storage-name\gt{} volume=\lt{}volume-name\gt{} +slot=\lt{}slot\gt{} + +If you leave out any part, you will be prompted for it. The media type is +automatically taken from the Storage resource definition that you supply. +Once the necessary information is obtained, the Console program contacts the +specified Storage daemon and requests that the tape be labeled. If the tape +labeling is successful, the Console program will create a Volume record in +the appropriate Pool. + +The Volume name is restricted to letters, numbers, and the special characters +hyphen ({\bf -}), underscore ({\bf \_}), colon ({\bf :}), and period ({\bf +.}). All other characters including a space are illegal. This restriction is +to ensure good readability of Volume names to reduce operator errors. + +Please note, when labeling a blank tape, Bacula will get {\bf read I/O error} when +it attempts to ensure that the tape is already labeled. If you wish to avoid +getting these messages, please write and EOF mark on your tape before +attempting to label it: + +\footnotesize +\begin{verbatim} + mt rewind + mt weof + +\end{verbatim} +\normalsize + +The label command can fail for a number of reasons: + + \begin{enumerate} + \item The Volume name you specify is already in the Volume database. + \item The Storage daemon has a tape already mounted on the device, in which + case you must {\bf unmount} the device, insert a blank tape, then do the + {\bf label} command. + \item The tape in the device is already a Bacula labeled tape. (Bacula will + never relabel a Bacula labeled tape unless it is recycled and you use the + {\bf relabel} command). + \item There is no tape in the drive. + \end{enumerate} + +There are two ways to relabel a volume that already has a Bacula label. The +brute force method is to write an end of file mark on the tape using the +system {\bf mt} program, something like the following: + +\footnotesize +\begin{verbatim} + mt -f /dev/st0 rewind + mt -f /dev/st0 weof + +\end{verbatim} +\normalsize + +Then you use the {\bf label} command to add a new label. However, this could +leave traces of the old volume in the catalog. + +The preferable method to relabel a tape is to first {\bf purge} the volume, +either automatically, or explicitly with the {\bf purge} command, then use +the {\bf relabel} command described below. + +If your autochanger has barcode labels, you can label all the Volumes in your +autochanger one after another by using the {\bf label barcodes} command. For +each tape in the changer containing a barcode, Bacula will mount the tape and +then label it with the same name as the barcode. An appropriate Media record +will also be created in the catalog. Any barcode that begins with the same +characters as specified on the ``CleaningPrefix=xxx'' command, will be +treated as a cleaning tape, and will not be labeled. For example with: + +\footnotesize +\begin{verbatim} + Pool { + Name ... + Cleaning Prefix = "CLN" + } + +\end{verbatim} +\normalsize + +Any slot containing a barcode of CLNxxxx will be treated as a cleaning tape +and will not be mounted. Note, the full form of the command is: + +\footnotesize +\begin{verbatim} + +update storage=xxx pool=yyy slots=1-5,10 barcodes +\end{verbatim} +\normalsize + +\item [list] + \index[console]{list } + The list command lists the requested contents of the Catalog. The various +fields of each record are listed on a single line. The various forms +of the list command are: +\footnotesize +\begin{verbatim} + list jobs + + list jobid=\lt{}id\gt{} + + list job=\lt{}job-name\gt{} + + list jobmedia + + list jobmedia jobid=\lt{}id\gt{} + + list jobmedia job=\lt{}job-name\gt{} + + list files jobid=\lt{}id\gt{} + + list files job=\lt{}job-name\gt{} + + list pools + + list clients + + list jobtotals + + list volumes + + list volumes jobid=\lt{}id\gt{} + + list volumes pool=\lt{}pool-name\gt{} + + list volumes job=\lt{}job-name\gt{} + + list volume=\lt{}volume-name\gt{} list nextvolume job=\lt{}job-name\gt{} + + list nextvol job=\lt{}job-name\gt{} +\end{verbatim} +\normalsize +What most of the above commands do should be more or less obvious. In general +if you do not specify all the command line arguments, the command will prompt +you for what is needed. + +The {\bf list nextvol} command will print the Volume name to be used by the +specified job. You should be aware that exactly what Volume will be used +depends on a lot of factors including the time and what a prior job will do. +It may fill a tape that is not full when you issue this command. As a +consequence, this command will give you a good estimate of what Volume will +be used but not a definitive answer. In addition, this command may have +certain side effect because it runs through the same algorithm as a job, +which means it may automatically purge or recycle a Volume. + +If you wish to add specialized commands that list the contents of the +catalog, you can do so by adding them to the {\bf query.sql} file. However, +this takes some knowledge of programming SQL. Please see the {\bf query} +command below for additional information. See below for listing the full +contents of a catalog record with the {\bf llist} command. + +As an example, the command {\bf list pools} might produce the following +output: + +\footnotesize +\begin{verbatim} ++------+---------+---------+---------+----------+-------------+ +| PoId | Name | NumVols | MaxVols | PoolType | LabelFormat | ++------+---------+---------+---------+----------+-------------+ +| 1 | Default | 0 | 0 | Backup | * | +| 2 | Recycle | 0 | 8 | Backup | File | ++------+---------+---------+---------+----------+-------------+ +\end{verbatim} +\normalsize + +As mentioned above, the {\bf list} command lists what is in the database. +Some things are put into the database immediately when Bacula starts up, but +in general, most things are put in only when they are first used, which is +the case for a Client as with Job records, etc. + +Bacula should create a client record in the database the first time you run a +job for that client. Doing a {\bf status} will not cause a database record to +be created. The client database record will be created whether or not the job +fails, but it must at least start. When the Client is actually contacted, +additional info from the client will be added to the client record (a ``uname +-a'' output). + +If you want to see what Client resources you have available in your conf +file, you use the Console command {\bf show clients}. + +\item [llist] + \index[console]{llist } + The llist or ``long list'' command takes all the same arguments that the list +command described above does. The difference is that the llist command list +the full contents of each database record selected. It does so by listing the +various fields of the record vertically, with one field per line. It is +possible to produce a very large number of output lines with this command. + +If instead of the {\bf list pools} as in the example above, you enter {\bf +llist pools} you might get the following output: + +\footnotesize +\begin{verbatim} + PoolId: 1 + Name: Default + NumVols: 0 + MaxVols: 0 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 1,296,000 + VolUseDuration: 86,400 + MaxVolJobs: 0 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: * + PoolId: 2 + Name: Recycle + NumVols: 0 + MaxVols: 8 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 3,600 + VolUseDuration: 3,600 + MaxVolJobs: 1 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: File + +\end{verbatim} +\normalsize + +\item [messages] + \index[console]{messages } + This command causes any pending console messages to be immediately displayed. + + +\item [mount] + \index[console]{mount } + The mount command is used to get Bacula to read a volume on a physical +device. It is a way to tell Bacula that you have mounted a tape and that +Bacula should examine the tape. This command is used only after there was no +Volume in a drive and Bacula requests you to mount a new Volume or when you +have specifically unmounted a Volume with the {\bf unmount} console command, +which causes Bacula to close the drive. If you have an autoloader, the mount +command will not cause Bacula to operate the autoloader. The various forms of +the mount command are: + +mount storage=\lt{}storage-name\gt{} + +mount [ jobid=\lt{}id\gt{} | job=\lt{}job-name\gt{} ] + +If you have specified {\bf Automatic Mount = yes} in the Storage daemon's +Device resource, under most circumstances, Bacula will automatically access +the Volume unless you have explicitly {\bf unmount}ed it in the Console +program. +\label{ManualPruning} + +\item [prune] + \index[console]{prune } + The Prune command allows you to safely remove expired database records from +Jobs and Volumes. This command works only on the Catalog database and does +not affect data written to Volumes. In all cases, the Prune command applies +a retention period to the specified records. You can Prune expired File +entries from Job records; you can Prune expired Job records from the +database, and you can Prune both expired Job and File records from specified +Volumes. + +prune files|jobs|volume client=\lt{}client-name\gt{} +volume=\lt{}volume-name\gt{} + +For a Volume to be pruned, the {\bf VolStatus} must be Full, Used, or Append, +otherwise the pruning will not take place. + +\item [purge] + \index[console]{purge } + The Purge command will delete associated Catalog database records from Jobs +and Volumes without considering the retention period. {\bf Purge} works only +on the Catalog database and does not affect data written to Volumes. This +command can be dangerous because you can delete catalog records associated +with current backups of files, and we recommend that you do not use it +unless you know what you are doing. The permitted forms of {\bf purge} are: +purge files +jobid=\lt{}jobid\gt{}|job=\lt{}job-name\gt{}|client=\lt{}client-name\gt{} + +purge jobs client=\lt{}client-name\gt{} (of all jobs) + +purge volume|volume=\lt{}vol-name\gt{} (of all jobs) + +For the {\bf purge} command to work on Volume Catalog database records the +{\bf VolStatus} must be Append, Full, Used, or Error. + +The actual data written to the Volume will be unaffected by this command. + +\item [relabel] + \index[console]{relabel } + This command is used to label physical volumes. The full form of this command +is: + +relabel storage=\lt{}storage-name\gt{} volume=\lt{}newvolume-name\gt{} +name=\lt{}old-volume-name\gt{} + +If you leave out any part, you will be prompted for it. In order for the +Volume (old-volume-name) to be relabeled, it must be in the catalog, and the +volume status must be marked {\bf Purged} or {\bf Recycle}. This happens +automatically as a result of applying retention periods, or you may +explicitly purge the volume using the {\bf purge} command. + +Once the volume is physically relabeled, the old data written on the Volume +is lost and cannot be recovered. + +\item [release] + \index[console]{release } + This command is used to cause the Storage daemon to rewind (release) the +current tape in the drive, and to re-read the Volume label the next time the +tape is used. + +release storage=\lt{}storage-name\gt{} + +After a release command, the device is still kept open by Bacula (unless +Always Open is set to No in the Storage Daemon's configuration) so it cannot +be used by another program. However, with some tape drives, the operator can +remove the current tape and to insert a different one, and when the next Job +starts, Bacula will know to re-read the tape label to find out what tape is +mounted. If you want to be able to use the drive with another program (e.g. +{\bf mt}), you must use the {\bf unmount} command to cause Bacula to +completely release (close) the device. + +\item [restore] + \index[console]{restore } + The restore command allows you to select one or more Jobs (JobIds) to be +restored using various methods. Once the JobIds are selected, the File +records for those Jobs are placed in an internal Bacula directory tree, and +the restore enters a file selection mode that allows you to interactively +walk up and down the file tree selecting individual files to be restored. +This mode is somewhat similar to the standard Unix {\bf restore} program's +interactive file selection mode. + +restore storage=\lt{}storage-name\gt{} client=\lt{}client-name\gt{} +where=\lt{}path\gt{} pool=\lt{}pool-name\gt{} fileset=\lt{}fileset-name\gt{} +select current all done + +Where {\bf current}, if specified, tells the restore command to automatically +select a restore to the most current backup. If not specified, you will be +prompted. The {\bf all} specification tells the restore command to restore +all files. If it is not specified, you will be prompted for the files to +restore. For details of the {\bf restore} command, please see the +\ilink{Restore Chapter}{_ChapterStart13} of this manual. + +\item [run] + \index[console]{run } + This command allows you to schedule jobs to be run immediately. The full form +of the command is: + +run job=\lt{}job-name\gt{} client=\lt{}client-name\gt{} +fileset=\lt{}FileSet-name\gt{} level=\lt{}level-keyword\gt{} +storage=\lt{}storage-name\gt{} where=\lt{}directory-prefix\gt{} +when=\lt{}universal-time-specification\gt{} yes + +Any information that is needed but not specified will be listed for +selection, and before starting the job, you will be prompted to accept, +reject, or modify the parameters of the job to be run, unless you have +specified {\bf yes}, in which case the job will be immediately sent to the +scheduler. + +On my system, when I enter a run command, I get the following prompt: + +\footnotesize +\begin{verbatim} +A job name must be specified. +The defined Job resources are: + 1: Matou + 2: Polymatou + 3: Rufus + 4: Minimatou + 5: Minou + 6: PmatouVerify + 7: MatouVerify + 8: RufusVerify + 9: Watchdog +Select Job resource (1-9): + +\end{verbatim} +\normalsize + +If I then select number 5, I am prompted with: + +\footnotesize +\begin{verbatim} +Run Backup job +JobName: Minou +FileSet: Minou Full Set +Level: Incremental +Client: Minou +Storage: DLTDrive +Pool: Default +When: 2003-04-23 17:08:18 +OK to run? (yes/mod/no): + +\end{verbatim} +\normalsize + +If I now enter {\bf yes}, the Job will be run. If I enter {\bf mod}, I will +be presented with the following prompt. + +\footnotesize +\begin{verbatim} +Parameters to modify: + 1: Level + 2: Storage + 3: Job + 4: FileSet + 5: Client + 6: When + 7: Pool +Select parameter to modify (1-7): + +\end{verbatim} +\normalsize + +If you wish to start a job at a later time, you can do so by setting the When +time. Use the {\bf mod} option and select {\bf When} (no. 6). Then enter the +desired start time in YYYY-MM-DD HH:MM:SS format. + +\item [setdebug] + \index[dir]{setdebug } + This command is used to set the debug level in each daemon. The form of this +command is: + +setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | +storage=\lt{}storage-name\gt{} | all] + +If trace=1 is set, then the tracing will be enabled, and the daemon where the +setdebug applies will be placed in trace mode, and all debug output will go +to the file {\bf bacula.trace} in the current directory of the daemon. +Normally, tracing is used only for Win32 clients where the debug output +cannot be written to a terminal or redirected to a file. When tracing, each +debug output message is appended to the trace file. You must explicitly +delete the file when you are done. + +\item [show] + \index[console]{show } + The show command will list the Director's resource records as defined in the +Director's configuration file (normally {\bf bacula-dir.conf}). This command +is used mainly for debugging purposes by developers. The following keywords +are accepted on the show command line: directors, clients, counters, jobs, +storages, catalogs, schedules, filesets, groups, pools, messages, all, help. +Please don't confuse this command with the {\bf list}, which displays the +contents of the catalog. + +\item [sqlquery] + \index[dir]{sqlquery } + The sqlquery command puts the Console program into SQL query mode where each +line you enter is concatenated to the previous line until a semicolon (;) is +seen. The semicolon terminates the command, which is then passed directly to +the SQL database engine. When the output from the SQL engine is displayed, +the formation of a new SQL command begins. To terminate SQL query mode and +return to the Console command prompt, you enter a period (.) in column 1. + +Using this command, you can query the SQL catalog database directly. Note you +should really know what you are doing otherwise you could damage the catalog +database. See the {\bf query} command below for simpler and safer way of +entering SQL queries. + +Depending on what database engine you are using (MySQL, PostgreSQL or SQLite), you will +have somewhat different SQL commands available. For more detailed +information, please refer to the MySQL, PostgreSQL or SQLite documentation. + +\item [status] + \index[dir]{status } + This command will display the status of the next jobs that are scheduled +during the next twenty-four hours as well as the status of currently running +jobs. The full form of this command is: + +status [all | dir=\lt{}dir-name\gt{} | director | +client=\lt{}client-name\gt{} | storage=\lt{}storage-name\gt{}] + +If you do a {\bf status dir}, the console will list any currently running +jobs, a summary of all jobs scheduled to be run in the next 24 hours, and a +listing of the last 10 terminated jobs with their statuses. The scheduled +jobs summary will include the Volume name to be used. You should be aware of +two things: 1. to obtain the volume name, the code goes through the same code +that will be used when the job runs, which means that it may prune or recycle +a Volume; 2. The Volume listed is only a best guess. The Volume actually +used may be different because of the time difference (more durations may +expire when the job runs) and another job could completely fill the Volume +requiring a new one. + +In the Running Jobs listing, you may find the following types of information: + + +\footnotesize +\begin{verbatim} +2507 Catalog MatouVerify.2004-03-13_05.05.02 is waiting execution +5349 Full CatalogBackup.2004-03-13_01.10.00 is waiting for higher + priority jobs to finish +5348 Differe Minou.2004-03-13_01.05.09 is waiting on max Storage jobs +5343 Full Rufus.2004-03-13_01.05.04 is running +\end{verbatim} +\normalsize + +Looking at the above listing from bottom to top, obviously JobId 5343 (Rufus) +is running. JobId 5348 (Minou) is waiting for JobId 5343 to finish because it +is using the Storage resource, hence the ``waiting on max Storage jobs''. +JobId 5349 has a lower priority than all the other jobs so it is waiting for +higher priority jobs to finish, and finally, JobId 2508 (MatouVerify) is +waiting because only one job can run at a time, hence it is simply ``waiting +execution\". + +\item [unmount] + \index[console]{unmount } + This command causes the indicated Bacula Storage daemon to unmount the + specified device. The forms of the command are the same as the mount command: +\footnotesize +\begin{verbatim} +unmount storage=\lt{}storage-name\gt{} + +unmount [ jobid=\lt{}id\gt{} | job=\lt{}job-name\gt{} ] +\end{verbatim} +\normalsize + +\label{UpdateCommand} +\item [update] + \index[console]{update } + This command will update the catalog for either a specific Pool record, a Volume + record, or the Slots in an autochanger with barcode capability. In the case + of updating a Pool record, the new information will be automatically taken + from the corresponding Director's configuration resource record. It can be + used to increase the maximum number of volumes permitted or to set a maximum + number of volumes. The following main keywords may be specified: +\footnotesize +\begin{verbatim} + media, volume, pool, slots +\end{verbatim} +\normalsize + +In the case of updating a Volume, you will be prompted for which value you +wish to change. The following Volume parameters may be changed: + +\footnotesize +\begin{verbatim} + + Volume Status + Volume Retention Period + Volume Use Duration + Maximum Volume Jobs + Maximum Volume Files + Maximum Volume Bytes + Recycle Flag + Slot + InChanger Flag + Pool + Volume Files + Volume from Pool + All Volumes from Pool + +\end{verbatim} +\normalsize + +For slots {\bf update slots}, Bacula will obtain a list of slots and their +barcodes from the Storage daemon, and for each barcode found, it will +automatically update the slot in the catalog Media record to correspond to +the new value. This is very useful if you have moved cassettes in the +magazine, or if you have removed the magazine and inserted a different one. +As the slot of each Volume is updated, the InChanger flag for that Volume +will also be set, and any other Volumes in the Pool will have their InChanger +flag turned off. This permits Bacula to know what magazine (tape holder) is +currently in the autochanger. + +If you do not have barcodes, you can accomplish the same thing in version +1.33 and later by using the {\bf update slots scan} command. The {\bf scan} +keyword tells Bacula to physically mount each tape and to read its +VolumeName. + +For Pool {\bf update pool}, Bacula will move the Volume record from its +existing pool to the pool specified. + +For {\bf Volume from Pool} and {\bf All Volumes from Pool}, the following +values are updated from the Pool record: Recycle, VolRetention, +VolUseDuration, MaxVolJobs, MaxVolFiles, and MaxVolBytes. + +The full form of the update command with all command line arguments is: + +\footnotesize +\begin{verbatim} + update volume=xxx pool=yyy slots volstatus=xxx VolRetention=ddd + VolUse=ddd MaxVolJobs=nnn MaxVolBytes=nnn Recycle=yes|no + slot=nnn + +\end{verbatim} +\normalsize + +\item [use] + \index[console]{use } + This command allows you to specify which Catalog database to use. Normally, +you will be using only one database so this will be done automatically. In +the case that you are using more than one database, you can use this command +to switch from one to another. + +use \lt{}database-name\gt{} + +\item [var] + \label{var} + \index[console]{var name } + This command takes a string or quoted string and does variable expansion on + it the same way variable expansion is done on the {\bf LabelFormat} string. + Thus, for the most part, you can test your LabelFormat strings. The + difference between the {\bf var} command and the actual LabelFormat process + is that during the var command, no job is running so ''dummy`` values are + used in place of Job specific variables. Generally, however, you will get a + good idea of what is going to happen in the real case. + +\item [version] + \index[console]{version } + The command prints the Director's version. + +\item [quit] + \index[console]{quit } + This command terminates the console program. The console program sends the +{\bf quit} request to the Director and waits for acknowledgment. If the +Director is busy doing a previous command for you that has not terminated, it +may take some time. You may quit immediately by issuing the {\bf .quit} +command (i.e. quit preceded by a period). + +\item [query] + \index[console]{query } + This command reads a predefined SQL query from the query file (the name and + location of the query file is defined with the QueryFile resource record in + the Director's configuration file). You are prompted to select a query from + the file, and possibly enter one or more parameters, then the command is + submitted to the Catalog database SQL engine. + +The following queries are currently available (version 1.24): + +\footnotesize +\begin{verbatim} +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 20 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: + 9: List where a File is saved: +Choose a query (1-9): + +\end{verbatim} +\normalsize + +\item [exit] + \index[console]{exit } + This command terminates the console program. + +\item [wait] + \index[console]{wait } + The wait command causes the Director to pause until there are no jobs +running. This command is useful in a batch situation such as regression +testing where you wish to start a job and wait until that job completes +before continuing. +\end{description} + +\label{dotcommands} + +\subsection*{Special dot Commands} +\index[general]{Commands!Special dot } +\index[general]{Special dot Commands } +\addcontentsline{toc}{subsection}{Special dot Commands} + +There is a list of commands that are prefixed with a period (.). These +commands are intended to be used either by batch programs or graphical user +interface front-ends. They are not normally used by interactive users. Once +GUI development begins, this list will be considerably expanded. The following +is the list of dot commands: + +\footnotesize +\begin{verbatim} +.die cause the Director to segment fault (for debugging) +.jobs list all job names +.filesets list all fileset names +.clients list all client names +.msgs return any queued messages +.quit quit +.exit quit +\end{verbatim} +\normalsize + +\label{atcommands} + +\subsection*{Special At (@) Commands} +\index[general]{Commands!Special At @ } +\index[general]{Special At (@) Commands } +\addcontentsline{toc}{subsection}{Special At (@) Commands} + +Normally, all commands entered to the Console program are immediately +forwarded to the Director, which may be on another machine, to be executed. +However, there is a small list of {\bf at} commands, all beginning with an at +character (@), that will not be sent to the Director, but rather interpreted +by the Console program directly. Note, these commands are implemented only in +the tty console program and not in the GNOME Console. These commands are: + +\begin{description} + +\item [@input \lt{}filename\gt{}] + \index[console]{@input \lt{}filename\gt{} } + Read and execute the commands contained in the file specified. + +\item [@output \lt{}filename\gt{} w/a] + \index[console]{@output \lt{}filename\gt{} w/a } + Send all following output to the filename specified either overwriting the +file (w) or appending to the file (a). To redirect the output to the +terminal, simply enter {\bf @output} without a filename specification. +WARNING: be careful not to overwrite a valid file. A typical example during a +regression test might be: + +\footnotesize +\begin{verbatim} + @output /dev/null + commands ... + @output + +\end{verbatim} +\normalsize + +\item [@tee \lt{}filename\gt{} w/a] + \index[console]{@tee \lt{}filename\gt{} w/a } + Send all subsequent output to both the specified file and the terminal. It is + turned off by specifying {\bf @tee} or {\bf @output} without a filename. + +\item [@sleep \lt{}seconds\gt{}] + \index[console]{@sleep \lt{}seconds\gt{} } + Sleep the specified number of seconds. + +\item [@time] + \index[console]{@time } + Print the current time and date. + +\item [@version] + \index[console]{@version } + Print the console's version. + +\item [@quit] + \index[console]{@quit } + quit + +\item [@exit] + \index[console]{@exit } + quit + +\item [@\# anything] + \index[console]{anything } + Comment +\end{description} + +\label{scripting} + +\subsection*{Running the Console Program from a Shell Script} +\index[general]{Script!Running the Console Program from a Shell } +\index[general]{Running the Console Program from a Shell Script } +\addcontentsline{toc}{subsection}{Running the Console Program from a Shell +Script} + +You can automate many Console tasks by running the console program from a +shell script. For example, if you have created a file containing the following +commands: + +\footnotesize +\begin{verbatim} + ./bconsole -c ./bconsole.conf < $DIR/backup.log 2>&1 < /dev/null & +\end{verbatim} +\normalsize + +It is important to redirect the input and outputs of a backgrounded command to +/dev/null to prevent the script from blocking. + + +\item [Client Run After Job = \lt{}command\gt{}] + \index[dir]{Client Run After Job } + This command is the same as {\bf Run After Job} except that it is run on the + client machine. Note, please see the notes above in {\bf Client Run Before + Job} concerning Windows clients. + +\item [Rerun Failed Levels = \lt{}yes|no\gt{}] + \index[dir]{Rerun Failed Levels } + If this directive is set to {\bf yes} (default no), and Bacula detects that a + previous job at a higher level (i.e. Full or Differential) has failed, the + current job level will be upgraded to the higher level. This is particularly + useful for Laptops where they may often be unreachable, and if a prior Full + save has failed, you wish the very next backup to be a Full save rather than + whatever level it is started as. + +\item [Spool Data = \lt{}yes|no\gt{}] + \index[dir]{Spool Data } + If this directive is set to {\bf yes} (default no), the Storage daemon will +be requested to spool the data for this Job to disk rather than write it +directly to tape. Once all the data arrives or the spool files' maximum sizes +are reached, the data will be despooled and written to tape. When this +directive is set to yes, the Spool Attributes is also automatically set to +yes. Spooling data prevents tape shoe-shine (start and stop) during +Incremental saves. This option should not be used if you are writing to a +disk file. + +\item [Spool Attributes = \lt{}yes|no\gt{}] + \index[dir]{Spool Attributes } + The default is set to {\bf no}, which means that the File attributes are sent +by the Storage daemon to the Director as they are stored on tape. However, +if you want to avoid the possibility that database updates will slow down +writing to the tape, you may want to set the value to {\bf yes}, in which +case the Storage daemon will buffer the File attributes and Storage +coordinates to a temporary file in the Working Directory, then when writing +the Job data to the tape is completed, the attributes and storage coordinates +will be sent to the Director. The default is {\bf no}. + +\item [Where = \lt{}directory\gt{}] + \index[dir]{Where } + This directive applies only to a Restore job and specifies a prefix to the +directory name of all files being restored. This permits files to be restored +in a different location from which they were saved. If {\bf Where} is not +specified or is set to backslash ({\bf /}), the files will be restored to +their original location. By default, we have set {\bf Where} in the example +configuration files to be {\bf /tmp/bacula-restores}. This is to prevent +accidental overwriting of your files. + +\item [Replace = \lt{}replace-option\gt{}] + \index[dir]{Replace } + This directive applies only to a Restore job and specifies what happens when +Bacula wants to restore a file or directory that already exists. You have the + following options for {\bf replace-option}: + +\begin{description} + +\item [always] + \index[dir]{always } + when the file to be restored already exists, it is deleted and then replaced by + the copy that was backed up. + +\item [ifnewer] + \index[dir]{ifnewer } + if the backed up file (on tape) is newer than the existing file, the existing + file is deleted and replaced by the back up. + +\item [ifolder] + \index[dir]{ifolder } + if the backed up file (on tape) is older than the existing file, the existing + file is deleted and replaced by the back up. + +\item [never] + \index[dir]{never } + if the backed up file already exists, Bacula skips restoring this file. +\end{description} + +\item [Prefix Links=\lt{}yes|no\gt{}] + \index[dir]{Prefix Links } + If a {\bf Where} path prefix is specified for a recovery job, apply it + to absolute links as well. The default is {\bf No}. When set to {\bf + Yes} then while restoring files to an alternate directory, any absolute + soft links will also be modified to point to the new alternate + directory. Normally this is what is desired -- i.e. everything is self + consistent. However, if you wish to later move the files to their + original locations, all files linked with absolute names will be broken. + +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] + \index[dir]{Maximum Concurrent Jobs } + where \lt{}number\gt{} is the maximum number of Jobs from the current + Job resource that can run concurrently. Note, this directive limits + only Jobs with the same name as the resource in which it appears. Any + other restrictions on the maximum concurrent jobs such as in the + Director, Client, or Storage resources will also apply in addition to + the limit specified here. The default is set to 1, but you may set it + to a larger number. We strongly recommend that you read the WARNING + documented under \ilink{ Maximum Concurrent Jobs}{DirMaxConJobs} in the + Director's resource. + +\item [Reschedule On Error = \lt{}yes|no\gt{}] + \index[dir]{Reschedule On Error } + If this directive is enabled, and the job terminates in error, the job + will be rescheduled as determined by the {\bf Reschedule Interval} and + {\bf Reschedule Times} directives. If you cancel the job, it will not + be rescheduled. The default is {\bf no} (i.e. the job will not be + rescheduled). + + + This specification can be useful for portables, laptops, or other + machines that are not always connected to the network or switched on. + +\item [Reschedule Interval = \lt{}time-specification\gt{}] + \index[dir]{Reschedule Interval } + If you have specified {\bf Reschedule On Error = yes} and the job + terminates in error, it will be rescheduled after the interval of time + specified by {\bf time-specification}. See \ilink{ the time + specification formats}{Time} in the Configure chapter for details of + time specifications. If no interval is specified, the job will not be + rescheduled on error. + +\item [Reschedule Times = \lt{}count\gt{}] + \index[dir]{Reschedule Times } + This directive specifies the maximum number of times to reschedule the + job. If it is set to zero (the default) the job will be rescheduled an + indefinite number of times. + +\label{Priority} +\item [Priority = \lt{}number\gt{}] + \index[dir]{Priority } + This directive permits you to control the order in which your jobs run + by specifying a positive non-zero number. The higher the number, the + lower the job priority. Assuming you are not running concurrent jobs, + all queued jobs of priority 1 will run before queued jobs of priority 2 + and so on, regardless of the original scheduling order. + + The priority only affects waiting jobs that are queued to run, not jobs + that are already running. If one or more jobs of priority 2 are already + running, and a new job is scheduled with priority 1, the currently + running priority 2 jobs must complete before the priority 1 job is run. + + The default priority is 10. + + If you want to run concurrent jobs, which is not recommended, you should keep + these points in mind: + +\begin{itemize} +\item To run concurrent jobs, you must set Maximum Concurrent Jobs = 2 in 5 + or 6 distinct places: in bacula-dir.conf in the Director, the Job, the + Client, the Storage resources; in bacula-fd in the FileDaemon (or Client) + resource, and in bacula-sd.conf in the Storage resource. If any one is + missing, it will throttle the jobs to one at a time. +\item Bacula concurrently runs jobs of only one priority at a time. It will + not simultaneously run a priority 1 and a priority 2 job. +\item If Bacula is running a priority 2 job and a new priority 1 job is + scheduled, it will wait until the running priority 2 job terminates even if + the Maximum Concurrent Jobs settings would otherwise allow two jobs to run + simultaneously. +\item Suppose that bacula is running a priority 2 job and a new priority 1 job + is scheduled and queued waiting for the running priority 2 job to terminate. + If you then start a second priority 2 job, the waiting priority 1 job will + prevent the new priority 2 job from running concurrently with the running + priority 2 job. That is: as long as there is a higher priority job waiting to + run, no new lower priority jobs will start even if the Maximum Concurrent + Jobs settings would normally allow them to run. This ensures that higher + priority jobs will be run as soon as possible. +\end{itemize} + +If you have several jobs of different priority, it is best not to start them +at exactly the same time, because Bacula must examine them one at a time. If +by chance Bacula treats a lower priority first, then it will run before your +high priority jobs. To avoid this, start any higher priority a few seconds +before lower ones. This insures that Bacula will examine the jobs in the +correct order, and that your priority scheme will be respected. + +\label{WritePartAfterJob} +\item [Write Part After Job = \lt{}yes|no\gt{}] + \index[dir]{Write Part After Job } + This directive is only implemented in version 1.37 and later. + If this directive is set to {\bf yes} (default {\bf no}), a new part file + will be created after the job is finished. + + It should be set to {\bf yes} when writing to devices that require mount + (for example DVD), so you are sure that the current part, containing + this job's data, is written to the device, and that no data is left in + the temporary file on the hard disk. However, on some media, like DVD+R + and DVD-R, a lot of space (about 10Mb) is lost everytime a part is + written. So, if you run several jobs each after another, you could set + this directive to {\bf no} for all jobs, except the last one, to avoid + wasting too much space, but to ensure that the data is written to the + medium when all jobs are finished. + + It is ignored with tape and FIFO devices. +\end{description} + +The following is an example of a valid Job resource definition: + +\footnotesize +\begin{verbatim} +Job { + Name = "Minou" + Type = Backup + Level = Incremental # default + Client = Minou + FileSet="Minou Full Set" + Storage = DLTDrive + Pool = Default + Schedule = "MinouWeeklyCycle" + Messages = Standard +} +\end{verbatim} +\normalsize + +\subsection*{The JobDefs Resource} +\label{JobDefsResource} +\index[general]{JobDefs Resource } +\index[general]{Resource!JobDefs } +\addcontentsline{toc}{subsection}{JobDefs Resource} + +The JobDefs resource permits all the same directives that can appear in a Job +resource. However, a JobDefs resource does not create a Job, rather it can be +referenced within a Job to provide defaults for that Job. This permits you to +concisely define several nearly identical Jobs, each one referencing a JobDefs +resource which contains the defaults. Only the changes from the defaults need to +be mentioned in each Job. + +\subsection*{The Schedule Resource} +\label{ScheduleResource} +\index[general]{Resource!Schedule } +\index[general]{Schedule Resource } +\addcontentsline{toc}{subsection}{Schedule Resource} + +The Schedule resource provides a means of automatically scheduling a Job as +well as the ability to override the default Level, Pool, Storage and Messages +resources. If a Schedule resource is not referenced in a Job, the Job can only +be run manually. In general, you specify an action to be taken and when. + +\begin{description} + +\item [Schedule] + \index[dir]{Schedule } + Start of the Schedule directives. No {\bf Schedule} resource is required, but +you will need at least one if you want Jobs to be automatically started. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name } + The name of the schedule being defined. The Name directive is required. + +\item [Run = \lt{}Job-overrides\gt{} \lt{}Date-time-specification\gt{}] + \index[dir]{Run } + The Run directive defines when a Job is to be run, and what overrides if any +to apply. You may specify multiple {\bf run} directives within a {\bf +Schedule} resource. If you do, they will all be applied (i.e. multiple +schedules). If you have two {\bf Run} directives that start at the same time, +two Jobs will start at the same time (well, within one second of each +other). + +The {\bf Job-overrides} permit overriding the Level, the Storage, the +Messages, and the Pool specifications provided in the Job resource. In +addition, the FullPool, the IncrementalPool, and the DifferentialPool +specifications permit overriding the Pool specification according to what +backup Job Level is in effect. + +By the use of overrides, you may customize a particular Job. For example, you +may specify a Messages override for your Incremental backups that outputs +messages to a log file, but for your weekly or monthly Full backups, you may +send the output by email by using a different Messages override. + +{\bf Job-overrides} are specified as: {\bf keyword=value} where the keyword +is Level, Storage, Messages, Pool, FullPool, DifferentialPool, or +IncrementalPool, and the {\bf value} is as defined on the respective +directive formats for the Job resource. You may specify multiple {\bf +Job-overrides} on one {\bf Run} directive by separating them with one or more +spaces or by separating them with a trailing comma. For example: + +\begin{description} + +\item [Level=Full] + \index[dir]{Level } + is all files in the FileSet whether or not they have changed. + +\item [Level=Incremental] + \index[dir]{Level } + is all files that have changed since the last backup. + +\item [Pool=Weekly] + \index[dir]{Pool } + specifies to use the Pool named {\bf Weekly}. + +\item [Storage=DLT\_Drive] + \index[dir]{Storage } + specifies to use {\bf DLT\_Drive} for the storage device. + +\item [Messages=Verbose] + \index[dir]{Messages } + specifies to use the {\bf Verbose} message resource for the Job. + +\item [FullPool=Full] + \index[dir]{FullPool } + specifies to use the Pool named {\bf Full} if the job is a full backup, or is +upgraded from another type to a full backup. + +\item [DifferentialPool=Differential] + \index[dir]{DifferentialPool } + specifies to use the Pool named {\bf Differential} if the job is a +differential backup. + +\item [IncrementalPool=Incremental] + \index[dir]{IncrementalPool } + specifies to use the Pool named {\bf Incremental} if the job is an +incremental backup. + +\item [SpoolData=yes|no] + \index[dir]{SpoolData } + tells Bacula to request the Storage daemon to spool data to a disk file +before putting it on tape. + +\item [WritePartAfterJob=yes|no] + \index[dir]{WritePartAfterJob } + tells Bacula to request the Storage daemon to write the current part file to + the device when the job is finished (see + \ilink{Write Part After Job directive in the Job + resource}{WritePartAfterJob}). Please note, this directive is implemented + only in version 1.37 and later. + +\end{description} + +{\bf Date-time-specification} determines when the Job is to be run. The +specification is a repetition, and as a default Bacula is set to run a job at +the beginning of the hour of every hour of every day of every week of every +month of every year. This is not normally what you want, so you must specify +or limit when you want the job to run. Any specification given is assumed to +be repetitive in nature and will serve to override or limit the default +repetition. This is done by specifying masks or times for the hour, day of the +month, day of the week, week of the month, week of the year, and month when +you want the job to run. By specifying one or more of the above, you can +define a schedule to repeat at almost any frequency you want. + +Basically, you must supply a {\bf month}, {\bf day}, {\bf hour}, and {\bf +minute} the Job is to be run. Of these four items to be specified, {\bf day} +is special in that you may either specify a day of the month such as 1, 2, +... 31, or you may specify a day of the week such as Monday, Tuesday, ... +Sunday. Finally, you may also specify a week qualifier to restrict the +schedule to the first, second, third, fourth, or fifth week of the month. + +For example, if you specify only a day of the week, such as {\bf Tuesday} the +Job will be run every hour of every Tuesday of every Month. That is the {\bf +month} and {\bf hour} remain set to the defaults of every month and all +hours. + +Note, by default with no other specification, your job will run at the +beginning of every hour. If you wish your job to run more than once in any +given hour, you will need to specify multiple {\bf run} specifications each +with a different minute. + +The date/time to run the Job can be specified in the following way in +pseudo-BNF: + +\footnotesize +\begin{verbatim} + = on + = at + = 1st | 2nd | 3rd | 4th | 5th | first | + second | third | forth | fifth + = sun | mon | tue | wed | thu | fri | sat | + sunday | monday | tuesday | wednesday | + thursday | friday + = w00 | w01 | ... w52 | w53 + = jan | feb | mar | apr | may | jun | jul | + aug | sep | oct | nov | dec | january | + february | ... | december + = daily + = weekly + = monthly + = hourly + = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 0 + = | +<12hour> = 0 | 1 | 2 | ... 12 + = 0 | 1 | 2 | ... 23 + = 0 | 1 | 2 | ... 59 + = 1 | 2 | ... 31 +